diff --git a/.config/lychee.toml b/.config/lychee.toml index 200521ac41ee..733b77ec0cff 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -2,9 +2,9 @@ # Run with `lychee -c .config/lychee.toml ./**/*.rs ./**/*.prdoc` cache = true -max_cache_age = "1d" +max_cache_age = "10d" max_redirects = 10 -max_retries = 6 +max_retries = 3 # Exclude localhost et.al. exclude_all_private = true @@ -51,4 +51,7 @@ exclude = [ # Behind a captcha (code 403): "https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/", "https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/", + # 403 rate limited: + "https://etherscan.io/block/11090290", + "https://substrate.stackexchange.com/.*", ] diff --git a/.github/workflows/auto-add-parachain-issues.yml b/.github/workflows/auto-add-parachain-issues.yml new file mode 100644 index 000000000000..6b5222b6ff74 --- /dev/null +++ b/.github/workflows/auto-add-parachain-issues.yml @@ -0,0 +1,30 @@ +# If there are new issues related to the async backing feature, +# add it to the parachain team's board and set a custom "meta" field. + +name: Add selected issues to Parachain team board +on: + issues: + types: + - labeled + +jobs: + add-parachain-issues: + if: github.event.label.name == 'T16-async_backing' + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@v2.1.0 + with: + app_id: ${{ secrets.PROJECT_APP_ID }} + private_key: ${{ secrets.PROJECT_APP_KEY }} + - name: Sync issues + uses: paritytech/github-issue-sync@v0.3.2 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PROJECT_TOKEN: ${{ steps.generate_token.outputs.token }} + project: 119 # Parachain team board + project_field: 'meta' + project_value: 'async backing' + labels: | + T16-async_backing diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 8b23dd30bb29..7f7d9d362782 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -21,6 +21,43 @@ jobs: - name: Skip merge queue if: ${{ contains(github.ref, 'gh-readonly-queue') }} run: exit 0 + - name: Get PR data + id: comments + run: | + echo "bodies=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT" + echo "reviews=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews --jq '[.[].state]')" >> "$GITHUB_OUTPUT" + env: + GH_TOKEN: ${{ github.token }} + - name: Fail when author pushes new code + # Require new reviews when the author is pushing and he is not a member + if: | + contains(fromJson(steps.comments.outputs.reviews), 'APPROVED') && + github.event_name == 'pull_request_target' && + github.event.action == 'synchronize' && + github.event.sender.login == github.event.pull_request.user.login && + github.event.pull_request.author_association != 'CONTRIBUTOR' && + github.event.pull_request.author_association != 'MEMBER' + run: | + echo "User's association is ${{ github.event.pull_request.author_association }}" + # We get the list of reviewers who approved the PR + REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews \ + --jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}') + + # We request them to review again + echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input - + + echo "::error::Project needs to be reviewed again" + exit 1 + env: + GH_TOKEN: ${{ github.token }} + - name: Comment requirements + # If the previous step failed and github-actions hasn't commented yet we comment instructions + if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') + run: | + gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" + env: + GH_TOKEN: ${{ github.token }} + COMMENTS: ${{ steps.comments.outputs.users }} - name: Get PR number env: PR_NUMBER: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/sync-templates.yml b/.github/workflows/sync-templates.yml new file mode 100644 index 000000000000..511c9d0e8cd0 --- /dev/null +++ b/.github/workflows/sync-templates.yml @@ -0,0 +1,159 @@ +name: Synchronize templates + + +# This job is used to keep the repository templates up-to-date. +# The code of the templates exist inside the monorepo, and upon releases we synchronize the repositories: +# - https://github.com/paritytech/polkadot-sdk-minimal-template +# - https://github.com/paritytech/polkadot-sdk-parachain-template +# - https://github.com/paritytech/polkadot-sdk-solochain-template +# +# The job moves the template code out of the monorepo, +# replaces any references to the monorepo workspace using psvm and toml-cli, +# checks that it builds successfully, +# and commits and pushes the result to each respective repository. +# If the build fails, a PR is created instead for manual inspection. + + +on: + # A manual dispatch for now - automatic on releases later. + workflow_dispatch: + inputs: + crate_release_version: + description: 'A release version to use, e.g. 1.9.0' + required: true + + +jobs: + sync-templates: + runs-on: ubuntu-latest + environment: master + strategy: + fail-fast: false + matrix: + template: ["minimal", "solochain", "parachain"] + env: + template-path: "polkadot-sdk-${{ matrix.template }}-template" + steps: + + # 1. Prerequisites. + + - name: Configure git identity + run: | + git config --global user.name "Template Bot" + git config --global user.email "163342540+paritytech-polkadotsdk-templatebot[bot]@users.noreply.github.com" + - uses: actions/checkout@v3 + with: + path: polkadot-sdk + ref: "release-crates-io-v${{ github.event.inputs.crate_release_version }}" + - name: Generate a token for the template repository + id: app_token + uses: actions/create-github-app-token@v1.9.3 + with: + owner: "paritytech" + repositories: "polkadot-sdk-${{ matrix.template }}-template" + app-id: ${{ secrets.TEMPLATE_APP_ID }} + private-key: ${{ secrets.TEMPLATE_APP_KEY }} + - uses: actions/checkout@v3 + with: + repository: "paritytech/polkadot-sdk-${{ matrix.template }}-template" + path: "${{ env.template-path }}" + token: ${{ steps.app_token.outputs.token }} + - name: Install toml-cli + run: cargo install --git https://github.com/gnprice/toml-cli --rev ea69e9d2ca4f0f858110dc7a5ae28bcb918c07fb # v0.2.3 + - name: Install Polkadot SDK Version Manager + run: cargo install --git https://github.com/paritytech/psvm --rev c41261ffb52ab0c115adbbdb17e2cb7900d2bdfd psvm # master + - name: Rust compilation prerequisites + run: | + sudo apt update + sudo apt install -y \ + protobuf-compiler + rustup target add wasm32-unknown-unknown + rustup component add rustfmt clippy rust-src + + # 2. Yanking the template out of the monorepo workspace. + + - name: Use psvm to replace git references with released creates. + run: find . -type f -name 'Cargo.toml' -exec psvm -o -v ${{ github.event.inputs.crate_release_version }} -p {} \; + working-directory: polkadot-sdk/templates/${{ matrix.template }}/ + - name: Create a new workspace Cargo.toml + run: | + cat << EOF > Cargo.toml + [workspace.package] + license = "MIT-0" + authors = ["Parity Technologies "] + homepage = "https://substrate.io" + + [workspace] + members = [ + "node", + "pallets/template", + "runtime", + ] + resolver = "2" + EOF + shell: bash + working-directory: polkadot-sdk/templates/${{ matrix.template }}/ + - name: Update workspace configuration + run: | + set -euo pipefail + # toml-cli has no overwrite functionality yet, so we use temporary files. + # We cannot pipe the output straight to the same file while the CLI still reads and processes it. + + toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.repository' "https://github.com/paritytech/polkadot-sdk-${{ matrix.template }}-template.git" > Cargo.temp + mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml + + toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.edition' "$(toml get --raw Cargo.toml 'workspace.package.edition')" > Cargo.temp + mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml + + toml get Cargo.toml 'workspace.lints' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml + + toml get Cargo.toml 'workspace.dependencies' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml + working-directory: polkadot-sdk + - name: Print the result Cargo.tomls for debugging + if: runner.debug == '1' + run: find . -type f -name 'Cargo.toml' -exec cat {} \; + working-directory: polkadot-sdk/templates/${{ matrix.template }}/ + + - name: Clean the destination repository + run: rm -rf ./* + working-directory: "${{ env.template-path }}" + - name: Copy over the new changes + run: | + cp -r polkadot-sdk/templates/${{ matrix.template }}/* "${{ env.template-path }}/" + + # 3. Verify the build. Push the changes or create a PR. + + # We've run into out-of-disk error when compiling in the next step, so we free up some space this way. + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # 1.3.1 + with: + android: true # This alone is a 12 GB save. + # We disable the rest because it caused some problems. (they're enabled by default) + # The Android removal is enough. + dotnet: false + haskell: false + large-packages: false + swap-storage: false + + - name: Check if it compiles + id: check-compilation + run: cargo check && cargo test + working-directory: "${{ env.template-path }}" + timeout-minutes: 90 + - name: Create PR on failure + if: failure() && steps.check-compilation.outcome == 'failure' + uses: peter-evans/create-pull-request@5b4a9f6a9e2af26e5f02351490b90d01eb8ec1e5 # v5 + with: + path: "${{ env.template-path }}" + token: ${{ steps.app_token.outputs.token }} + add-paths: | + ./* + title: "[Don't merge] Update the ${{ matrix.template }} template" + body: "The template has NOT been successfully built and needs to be inspected." + branch: "update-template/${{ github.event_name }}" + - name: Push changes + run: | + git add -A . + git commit --allow-empty -m "Update template triggered by ${{ github.event_name }}" + git push + working-directory: "${{ env.template-path }}" diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 954df10bef01..d8f5d5832291 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -70,7 +70,9 @@ publish-subsystem-benchmarks: - .kubernetes-env - .publish-gh-pages-refs needs: - - job: subsystem-regression-tests + - job: subsystem-benchmark-availability-recovery + artifacts: true + - job: subsystem-benchmark-availability-distribution artifacts: true - job: publish-rustdoc artifacts: false @@ -109,7 +111,9 @@ trigger_workflow: needs: - job: publish-subsystem-benchmarks artifacts: false - - job: subsystem-regression-tests + - job: subsystem-benchmark-availability-recovery + artifacts: true + - job: subsystem-benchmark-availability-distribution artifacts: true script: - echo "Triggering workflow" diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index af16f5d2de7f..1d6efd7b9fd1 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -511,12 +511,12 @@ test-syscalls: fi allow_failure: false # this rarely triggers in practice -subsystem-regression-tests: +subsystem-benchmark-availability-recovery: stage: test artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" when: always - expire_in: 1 days + expire_in: 1 hour paths: - charts/ extends: @@ -524,8 +524,25 @@ subsystem-regression-tests: - .common-refs - .run-immediately script: - - cargo bench --profile=testnet -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks - - cargo bench --profile=testnet -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks + - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks + tags: + - benchmark + allow_failure: true + +subsystem-benchmark-availability-distribution: + stage: test + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 1 hour + paths: + - charts/ + extends: + - .docker-env + - .common-refs + - .run-immediately + script: + - cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks tags: - benchmark allow_failure: true diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index ba05c709a27b..6b72075c513b 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -161,6 +161,8 @@ zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: extends: - .zombienet-polkadot-common + variables: + FORCED_INFRA_INSTANCE: "spot-iops" script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" diff --git a/Cargo.lock b/Cargo.lock index c17b59c6af28..aeb8b2cdb190 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -809,6 +809,7 @@ dependencies = [ "parachains-common", "rococo-emulated-chain", "sp-core", + "staging-xcm", "testnet-parachains-constants", ] @@ -819,17 +820,22 @@ dependencies = [ "assert_matches", "asset-hub-rococo-runtime", "asset-test-utils", + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", "frame-support", "pallet-asset-conversion", "pallet-assets", "pallet-balances", "pallet-message-queue", + "pallet-treasury", + "pallet-utility", "pallet-xcm", "parachains-common", "parity-scale-codec", "penpal-runtime", + "polkadot-runtime-common", "rococo-runtime", + "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", "staging-xcm", @@ -865,6 +871,7 @@ dependencies = [ "hex-literal", "log", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", "pallet-aura", @@ -927,6 +934,7 @@ dependencies = [ "frame-support", "parachains-common", "sp-core", + "staging-xcm", "testnet-parachains-constants", "westend-emulated-chain", ] @@ -988,6 +996,7 @@ dependencies = [ "hex-literal", "log", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", "pallet-aura", @@ -2828,6 +2837,36 @@ dependencies = [ "testnet-parachains-constants", ] +[[package]] +name = "collectives-westend-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-hub-westend-runtime", + "collectives-westend-runtime", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", + "frame-support", + "pallet-asset-rate", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-treasury", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", + "testnet-parachains-constants", + "westend-runtime", + "westend-runtime-constants", + "westend-system-emulated-network", +] + [[package]] name = "collectives-westend-runtime" version = "3.0.0" @@ -7346,6 +7385,7 @@ dependencies = [ "node-primitives", "pallet-alliance", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", @@ -9497,6 +9537,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-assets", "pallet-balances", "parity-scale-codec", @@ -9510,6 +9551,27 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-asset-conversion-ops" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "parity-scale-codec", + "primitive-types", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", +] + [[package]] name = "pallet-asset-conversion-tx-payment" version = "10.0.0" @@ -9569,7 +9631,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "29.0.0" +version = "29.1.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -9934,7 +9996,9 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", + "pretty_assertions", "scale-info", "sp-api", "sp-arithmetic", @@ -12756,7 +12820,6 @@ dependencies = [ "sp-runtime", "substrate-build-script-utils", "thiserror", - "try-runtime-cli", ] [[package]] @@ -15771,6 +15834,7 @@ dependencies = [ "pallet-multisig", "pallet-nis", "pallet-offences", + "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", @@ -18980,7 +19044,6 @@ dependencies = [ "sp-timestamp", "substrate-build-script-utils", "substrate-frame-rpc-system", - "try-runtime-cli", ] [[package]] @@ -20247,7 +20310,6 @@ dependencies = [ "tempfile", "tokio", "tokio-util", - "try-runtime-cli", "wait-timeout", "wat", ] @@ -21933,47 +21995,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "try-runtime-cli" -version = "0.38.0" -dependencies = [ - "assert_cmd", - "async-trait", - "clap 4.5.3", - "frame-remote-externalities", - "frame-try-runtime", - "hex", - "log", - "node-primitives", - "parity-scale-codec", - "regex", - "sc-cli", - "sc-executor", - "serde", - "serde_json", - "sp-api", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-core", - "sp-debug-derive 14.0.0", - "sp-externalities 0.25.0", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-rpc", - "sp-runtime", - "sp-state-machine", - "sp-timestamp", - "sp-transaction-storage-proof", - "sp-version", - "sp-weights", - "substrate-cli-test-utils", - "substrate-rpc-client", - "tempfile", - "tokio", - "zstd 0.12.4", -] - [[package]] name = "trybuild" version = "1.0.89" diff --git a/Cargo.toml b/Cargo.toml index 67c51bc1e173..42a6bc8abe1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,7 @@ members = [ "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend", "cumulus/parachains/integration-tests/emulated/tests/people/people-rococo", "cumulus/parachains/integration-tests/emulated/tests/people/people-westend", "cumulus/parachains/pallets/collective-content", @@ -300,6 +301,7 @@ members = [ "substrate/frame", "substrate/frame/alliance", "substrate/frame/asset-conversion", + "substrate/frame/asset-conversion/ops", "substrate/frame/asset-rate", "substrate/frame/assets", "substrate/frame/atomic-swap", @@ -507,7 +509,6 @@ members = [ "substrate/utils/frame/rpc/state-trie-migration-rpc", "substrate/utils/frame/rpc/support", "substrate/utils/frame/rpc/system", - "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs new file mode 100644 index 000000000000..4b0c052df800 --- /dev/null +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -0,0 +1,205 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Transaction extension that rejects bridge-related transactions, that include +//! obsolete (duplicated) data or do not pass some additional pallet-specific +//! checks. + +use crate::messages_call_ext::MessagesCallSubType; +use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; +use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; +use sp_runtime::transaction_validity::TransactionValidity; + +/// A duplication of the `FilterCall` trait. +/// +/// We need this trait in order to be able to implement it for the messages pallet, +/// since the implementation is done outside of the pallet crate. +pub trait BridgeRuntimeFilterCall { + /// Checks if a runtime call is valid. + fn validate(call: &Call) -> TransactionValidity; +} + +impl BridgeRuntimeFilterCall for pallet_bridge_grandpa::Pallet +where + T: pallet_bridge_grandpa::Config, + T::RuntimeCall: GrandpaCallSubType, +{ + fn validate(call: &T::RuntimeCall) -> TransactionValidity { + GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) + } +} + +impl BridgeRuntimeFilterCall + for pallet_bridge_parachains::Pallet +where + T: pallet_bridge_parachains::Config, + T::RuntimeCall: ParachainsCallSubtype, +{ + fn validate(call: &T::RuntimeCall) -> TransactionValidity { + ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) + } +} + +impl, I: 'static> BridgeRuntimeFilterCall + for pallet_bridge_messages::Pallet +where + T::RuntimeCall: MessagesCallSubType, +{ + /// Validate messages in order to avoid "mining" messages delivery and delivery confirmation + /// transactions, that are delivering outdated messages/confirmations. Without this validation, + /// even honest relayers may lose their funds if there are multiple relays running and + /// submitting the same messages/confirmations. + fn validate(call: &T::RuntimeCall) -> TransactionValidity { + call.check_obsolete_call() + } +} + +/// Declares a runtime-specific `BridgeRejectObsoleteHeadersAndMessages` signed extension. +/// +/// ## Example +/// +/// ```nocompile +/// generate_bridge_reject_obsolete_headers_and_messages!{ +/// Call, AccountId +/// BridgeRococoGrandpa, BridgeRococoMessages, +/// BridgeRococoParachains +/// } +/// ``` +/// +/// The goal of this extension is to avoid "mining" transactions that provide outdated bridged +/// headers and messages. Without that extension, even honest relayers may lose their funds if +/// there are multiple relays running and submitting the same information. +#[macro_export] +macro_rules! generate_bridge_reject_obsolete_headers_and_messages { + ($call:ty, $account_id:ty, $($filter_call:ty),*) => { + #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] + pub struct BridgeRejectObsoleteHeadersAndMessages; + impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { + const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; + type AccountId = $account_id; + type Call = $call; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> sp_std::result::Result< + (), + sp_runtime::transaction_validity::TransactionValidityError, + > { + Ok(()) + } + + fn validate( + &self, + _who: &Self::AccountId, + call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> sp_runtime::transaction_validity::TransactionValidity { + let valid = sp_runtime::transaction_validity::ValidTransaction::default(); + $( + let valid = valid + .combine_with(<$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<$call>>::validate(call)?); + )* + Ok(valid) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &sp_runtime::traits::DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info, len).map(drop) + } + } + }; +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::{assert_err, assert_ok}; + use sp_runtime::{ + traits::SignedExtension, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + }; + + pub struct MockCall { + data: u32, + } + + impl sp_runtime::traits::Dispatchable for MockCall { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> sp_runtime::DispatchResultWithInfo { + unimplemented!() + } + } + + struct FirstFilterCall; + impl BridgeRuntimeFilterCall for FirstFilterCall { + fn validate(call: &MockCall) -> TransactionValidity { + if call.data <= 1 { + return InvalidTransaction::Custom(1).into() + } + + Ok(ValidTransaction { priority: 1, ..Default::default() }) + } + } + + struct SecondFilterCall; + impl BridgeRuntimeFilterCall for SecondFilterCall { + fn validate(call: &MockCall) -> TransactionValidity { + if call.data <= 2 { + return InvalidTransaction::Custom(2).into() + } + + Ok(ValidTransaction { priority: 2, ..Default::default() }) + } + } + + #[test] + fn test() { + generate_bridge_reject_obsolete_headers_and_messages!( + MockCall, + (), + FirstFilterCall, + SecondFilterCall + ); + + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), + InvalidTransaction::Custom(1) + ); + + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), + InvalidTransaction::Custom(2) + ); + + assert_ok!( + BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), + ValidTransaction { priority: 3, ..Default::default() } + ) + } +} diff --git a/bridges/bin/runtime-common/src/extensions/mod.rs b/bridges/bin/runtime-common/src/extensions/mod.rs new file mode 100644 index 000000000000..3f1b506aaae3 --- /dev/null +++ b/bridges/bin/runtime-common/src/extensions/mod.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Bridge-specific transaction extensions. + +pub mod check_obsolete_extension; +pub mod priority_calculator; +pub mod refund_relayer_extension; diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs similarity index 100% rename from bridges/bin/runtime-common/src/priority_calculator.rs rename to bridges/bin/runtime-common/src/extensions/priority_calculator.rs diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs similarity index 99% rename from bridges/bin/runtime-common/src/refund_relayer_extension.rs rename to bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index 455392a0a277..64ae1d0b669f 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -23,7 +23,7 @@ use crate::messages_call_ext::{ CallHelper as MessagesCallHelper, CallInfo as MessagesCallInfo, MessagesCallSubType, }; use bp_messages::{LaneId, MessageNonce}; -use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; +use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::{Chain, Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider}; use codec::{Codec, Decode, Encode}; use frame_support::{ @@ -520,8 +520,9 @@ where } // compute priority boost - let priority_boost = - crate::priority_calculator::compute_priority_boost::(bundled_messages); + let priority_boost = crate::extensions::priority_calculator::compute_priority_boost::< + T::Priority, + >(bundled_messages); let valid_transaction = ValidTransactionBuilder::default().priority(priority_boost); log::trace!( @@ -588,7 +589,10 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister(&relayer, slash_account), + RelayersPallet::::slash_and_deregister( + &relayer, + ExplicitOrAccountParams::Params(slash_account), + ), } Ok(()) diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index 2722f6f1c6d1..5679acd6006c 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -19,11 +19,7 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use crate::messages_call_ext::MessagesCallSubType; -use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; -use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; -use sp_runtime::transaction_validity::TransactionValidity; - +pub mod extensions; pub mod messages; pub mod messages_api; pub mod messages_benchmarking; @@ -31,8 +27,6 @@ pub mod messages_call_ext; pub mod messages_generation; pub mod messages_xcm_extension; pub mod parachains_benchmarking; -pub mod priority_calculator; -pub mod refund_relayer_extension; mod mock; @@ -40,184 +34,3 @@ mod mock; pub mod integrity; const LOG_TARGET_BRIDGE_DISPATCH: &str = "runtime::bridge-dispatch"; - -/// A duplication of the `FilterCall` trait. -/// -/// We need this trait in order to be able to implement it for the messages pallet, -/// since the implementation is done outside of the pallet crate. -pub trait BridgeRuntimeFilterCall { - /// Checks if a runtime call is valid. - fn validate(call: &Call) -> TransactionValidity; -} - -impl BridgeRuntimeFilterCall for pallet_bridge_grandpa::Pallet -where - T: pallet_bridge_grandpa::Config, - T::RuntimeCall: GrandpaCallSubType, -{ - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) - } -} - -impl BridgeRuntimeFilterCall - for pallet_bridge_parachains::Pallet -where - T: pallet_bridge_parachains::Config, - T::RuntimeCall: ParachainsCallSubtype, -{ - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) - } -} - -impl, I: 'static> BridgeRuntimeFilterCall - for pallet_bridge_messages::Pallet -where - T::RuntimeCall: MessagesCallSubType, -{ - /// Validate messages in order to avoid "mining" messages delivery and delivery confirmation - /// transactions, that are delivering outdated messages/confirmations. Without this validation, - /// even honest relayers may lose their funds if there are multiple relays running and - /// submitting the same messages/confirmations. - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - call.check_obsolete_call() - } -} - -/// Declares a runtime-specific `BridgeRejectObsoleteHeadersAndMessages` signed extension. -/// -/// ## Example -/// -/// ```nocompile -/// generate_bridge_reject_obsolete_headers_and_messages!{ -/// Call, AccountId -/// BridgeRococoGrandpa, BridgeRococoMessages, -/// BridgeRococoParachains -/// } -/// ``` -/// -/// The goal of this extension is to avoid "mining" transactions that provide outdated bridged -/// headers and messages. Without that extension, even honest relayers may lose their funds if -/// there are multiple relays running and submitting the same information. -#[macro_export] -macro_rules! generate_bridge_reject_obsolete_headers_and_messages { - ($call:ty, $account_id:ty, $($filter_call:ty),*) => { - #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] - pub struct BridgeRejectObsoleteHeadersAndMessages; - impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { - const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; - type AccountId = $account_id; - type Call = $call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result< - (), - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } - - fn validate( - &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> sp_runtime::transaction_validity::TransactionValidity { - let valid = sp_runtime::transaction_validity::ValidTransaction::default(); - $( - let valid = valid - .combine_with(<$filter_call as $crate::BridgeRuntimeFilterCall<$call>>::validate(call)?); - )* - Ok(valid) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(drop) - } - } - }; -} - -#[cfg(test)] -mod tests { - use crate::BridgeRuntimeFilterCall; - use frame_support::{assert_err, assert_ok}; - use sp_runtime::{ - traits::SignedExtension, - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, - }; - - pub struct MockCall { - data: u32, - } - - impl sp_runtime::traits::Dispatchable for MockCall { - type RuntimeOrigin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch( - self, - _origin: Self::RuntimeOrigin, - ) -> sp_runtime::DispatchResultWithInfo { - unimplemented!() - } - } - - struct FirstFilterCall; - impl BridgeRuntimeFilterCall for FirstFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { - if call.data <= 1 { - return InvalidTransaction::Custom(1).into() - } - - Ok(ValidTransaction { priority: 1, ..Default::default() }) - } - } - - struct SecondFilterCall; - impl BridgeRuntimeFilterCall for SecondFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { - if call.data <= 2 { - return InvalidTransaction::Custom(2).into() - } - - Ok(ValidTransaction { priority: 2, ..Default::default() }) - } - } - - #[test] - fn test() { - generate_bridge_reject_obsolete_headers_and_messages!( - MockCall, - (), - FirstFilterCall, - SecondFilterCall - ); - - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), - InvalidTransaction::Custom(1) - ); - - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), - InvalidTransaction::Custom(2) - ); - - assert_ok!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), - ValidTransaction { priority: 3, ..Default::default() } - ) - } -} diff --git a/bridges/modules/relayers/src/benchmarking.rs b/bridges/modules/relayers/src/benchmarking.rs index 00c3814a4c38..ca312d44edfd 100644 --- a/bridges/modules/relayers/src/benchmarking.rs +++ b/bridges/modules/relayers/src/benchmarking.rs @@ -106,7 +106,7 @@ benchmarks! { let slash_destination = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); T::prepare_rewards_account(slash_destination, Zero::zero()); }: { - crate::Pallet::::slash_and_deregister(&relayer, slash_destination) + crate::Pallet::::slash_and_deregister(&relayer, slash_destination.into()) } verify { assert!(!crate::Pallet::::is_registration_active(&relayer)); diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index ce66c9df48e0..7a3a0f9ea94c 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -21,7 +21,8 @@ #![warn(missing_docs)] use bp_relayers::{ - PaymentProcedure, Registration, RelayerRewardsKeyProvider, RewardsAccountParams, StakeAndSlash, + ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, + RewardsAccountParams, StakeAndSlash, }; use bp_runtime::StorageDoubleMapKeyProvider; use frame_support::fail; @@ -242,7 +243,7 @@ pub mod pallet { /// It may fail inside, but error is swallowed and we only log it. pub fn slash_and_deregister( relayer: &T::AccountId, - slash_destination: RewardsAccountParams, + slash_destination: ExplicitOrAccountParams, ) { let registration = match RegisteredRelayers::::take(relayer) { Some(registration) => registration, @@ -259,7 +260,7 @@ pub mod pallet { match T::StakeAndSlash::repatriate_reserved( relayer, - slash_destination, + slash_destination.clone(), registration.stake, ) { Ok(failed_to_slash) if failed_to_slash.is_zero() => { diff --git a/bridges/modules/relayers/src/stake_adapter.rs b/bridges/modules/relayers/src/stake_adapter.rs index 88af9b1877bf..7ba90d91dfd9 100644 --- a/bridges/modules/relayers/src/stake_adapter.rs +++ b/bridges/modules/relayers/src/stake_adapter.rs @@ -17,7 +17,7 @@ //! Code that allows `NamedReservableCurrency` to be used as a `StakeAndSlash` //! mechanism of the relayers pallet. -use bp_relayers::{PayRewardFromAccount, RewardsAccountParams, StakeAndSlash}; +use bp_relayers::{ExplicitOrAccountParams, PayRewardFromAccount, StakeAndSlash}; use codec::Codec; use frame_support::traits::{tokens::BalanceStatus, NamedReservableCurrency}; use sp_runtime::{traits::Get, DispatchError, DispatchResult}; @@ -55,11 +55,14 @@ where fn repatriate_reserved( relayer: &AccountId, - beneficiary: RewardsAccountParams, + beneficiary: ExplicitOrAccountParams, amount: Currency::Balance, ) -> Result { - let beneficiary_account = - PayRewardFromAccount::<(), AccountId>::rewards_account(beneficiary); + let beneficiary_account = match beneficiary { + ExplicitOrAccountParams::Explicit(account) => account, + ExplicitOrAccountParams::Params(params) => + PayRewardFromAccount::<(), AccountId>::rewards_account(params), + }; Currency::repatriate_reserved_named( &ReserveId::get(), relayer, @@ -134,7 +137,11 @@ mod tests { Balances::mint_into(&beneficiary_account, expected_balance).unwrap(); assert_eq!( - TestStakeAndSlash::repatriate_reserved(&1, beneficiary, test_stake()), + TestStakeAndSlash::repatriate_reserved( + &1, + ExplicitOrAccountParams::Params(beneficiary), + test_stake() + ), Ok(test_stake()) ); assert_eq!(Balances::free_balance(1), 0); @@ -146,7 +153,11 @@ mod tests { Balances::mint_into(&2, test_stake() * 2).unwrap(); TestStakeAndSlash::reserve(&2, test_stake() / 3).unwrap(); assert_eq!( - TestStakeAndSlash::repatriate_reserved(&2, beneficiary, test_stake()), + TestStakeAndSlash::repatriate_reserved( + &2, + ExplicitOrAccountParams::Params(beneficiary), + test_stake() + ), Ok(test_stake() - test_stake() / 3) ); assert_eq!(Balances::free_balance(2), test_stake() * 2 - test_stake() / 3); @@ -158,7 +169,11 @@ mod tests { Balances::mint_into(&3, test_stake() * 2).unwrap(); TestStakeAndSlash::reserve(&3, test_stake()).unwrap(); assert_eq!( - TestStakeAndSlash::repatriate_reserved(&3, beneficiary, test_stake()), + TestStakeAndSlash::repatriate_reserved( + &3, + ExplicitOrAccountParams::Params(beneficiary), + test_stake() + ), Ok(0) ); assert_eq!(Balances::free_balance(3), test_stake()); @@ -176,7 +191,12 @@ mod tests { Balances::mint_into(&3, test_stake() * 2).unwrap(); TestStakeAndSlash::reserve(&3, test_stake()).unwrap(); - assert!(TestStakeAndSlash::repatriate_reserved(&3, beneficiary, test_stake()).is_err()); + assert!(TestStakeAndSlash::repatriate_reserved( + &3, + ExplicitOrAccountParams::Params(beneficiary), + test_stake() + ) + .is_err()); assert_eq!(Balances::free_balance(3), test_stake()); assert_eq!(Balances::reserved_balance(3), test_stake()); assert_eq!(Balances::free_balance(beneficiary_account), 0); diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index 5d0be41b1b55..ece72ac8494b 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -191,6 +191,10 @@ pub mod pallet { impl, I: 'static> Pallet { /// Called when new message is sent (queued to local outbound XCM queue) over the bridge. pub(crate) fn on_message_sent_to_bridge(message_size: u32) { + log::trace!( + target: LOG_TARGET, + "on_message_sent_to_bridge - message_size: {message_size:?}", + ); let _ = Bridge::::try_mutate(|bridge| { let is_channel_with_bridge_hub_congested = T::WithBridgeHubChannel::is_congested(); let is_bridge_congested = bridge.is_congested; @@ -238,14 +242,16 @@ impl, I: 'static> ExporterFor for Pallet { remote_location: &InteriorLocation, message: &Xcm<()>, ) -> Option<(Location, Option)> { + log::trace!( + target: LOG_TARGET, + "exporter_for - network: {network:?}, remote_location: {remote_location:?}, msg: {message:?}", + ); // ensure that the message is sent to the expected bridged network (if specified). if let Some(bridged_network) = T::BridgedNetworkId::get() { if *network != bridged_network { log::trace!( target: LOG_TARGET, - "Router with bridged_network_id {:?} does not support bridging to network {:?}!", - bridged_network, - network, + "Router with bridged_network_id {bridged_network:?} does not support bridging to network {network:?}!", ); return None } @@ -300,7 +306,7 @@ impl, I: 'static> ExporterFor for Pallet { log::info!( target: LOG_TARGET, - "Going to send message to {:?} ({} bytes) over bridge. Computed bridge fee {:?} using fee factor {}", + "Validate send message to {:?} ({} bytes) over bridge. Computed bridge fee {:?} using fee factor {}", (network, remote_location), message_size, fee, @@ -321,40 +327,59 @@ impl, I: 'static> SendXcm for Pallet { dest: &mut Option, xcm: &mut Option>, ) -> SendResult { - // `dest` and `xcm` are required here - let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?; - let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?; - - // we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute - // everything required here - let message_size = xcm_ref.encoded_size() as _; - - // bridge doesn't support oversized/overweight messages now. So it is better to drop such - // messages here than at the bridge hub. Let's check the message size. - if message_size > HARD_MESSAGE_SIZE_LIMIT { - return Err(SendError::ExceedsMaxMessageSize) - } + log::trace!(target: LOG_TARGET, "validate - msg: {xcm:?}, destination: {dest:?}"); + + // In case of success, the `ViaBridgeHubExporter` can modify XCM instructions and consume + // `dest` / `xcm`, so we retain the clone of original message and the destination for later + // `DestinationVersion` validation. + let xcm_to_dest_clone = xcm.clone(); + let dest_clone = dest.clone(); + + // First, use the inner exporter to validate the destination to determine if it is even + // routable. If it is not, return an error. If it is, then the XCM is extended with + // instructions to pay the message fee at the sibling/child bridge hub. The cost will + // include both the cost of (1) delivery to the sibling bridge hub (returned by + // `Config::ToBridgeHubSender`) and (2) delivery to the bridged bridge hub (returned by + // `Self::exporter_for`). + match ViaBridgeHubExporter::::validate(dest, xcm) { + Ok((ticket, cost)) => { + // If the ticket is ok, it means we are routing with this router, so we need to + // apply more validations to the cloned `dest` and `xcm`, which are required here. + let xcm_to_dest_clone = xcm_to_dest_clone.ok_or(SendError::MissingArgument)?; + let dest_clone = dest_clone.ok_or(SendError::MissingArgument)?; + + // We won't have access to `dest` and `xcm` in the `deliver` method, so we need to + // precompute everything required here. However, `dest` and `xcm` were consumed by + // `ViaBridgeHubExporter`, so we need to use their clones. + let message_size = xcm_to_dest_clone.encoded_size() as _; + + // The bridge doesn't support oversized or overweight messages. Therefore, it's + // better to drop such messages here rather than at the bridge hub. Let's check the + // message size." + if message_size > HARD_MESSAGE_SIZE_LIMIT { + return Err(SendError::ExceedsMaxMessageSize) + } - // We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm` - // program. This may seem like an additional, unnecessary check, but it is not. A similar - // check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a - // versioned message to the sibling bridge hub. However, the local bridge hub may have a - // higher XCM version than the remote `dest`. Once again, it is better to discard such - // messages here than at the bridge hub (e.g., to avoid losing funds). - let destination_version = T::DestinationVersion::get_version_for(dest_ref) - .ok_or(SendError::DestinationUnsupported)?; - let _ = VersionedXcm::from(xcm_ref.clone()) - .into_version(destination_version) - .map_err(|()| SendError::DestinationUnsupported)?; - - // just use exporter to validate destination and insert instructions to pay message fee - // at the sibling/child bridge hub - // - // the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by - // the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by - // `Self::exporter_for`) - ViaBridgeHubExporter::::validate(dest, xcm) - .map(|(ticket, cost)| ((message_size, ticket), cost)) + // We need to ensure that the known `dest`'s XCM version can comprehend the current + // `xcm` program. This may seem like an additional, unnecessary check, but it is + // not. A similar check is probably performed by the `ViaBridgeHubExporter`, which + // attempts to send a versioned message to the sibling bridge hub. However, the + // local bridge hub may have a higher XCM version than the remote `dest`. Once + // again, it is better to discard such messages here than at the bridge hub (e.g., + // to avoid losing funds). + let destination_version = T::DestinationVersion::get_version_for(&dest_clone) + .ok_or(SendError::DestinationUnsupported)?; + let _ = VersionedXcm::from(xcm_to_dest_clone) + .into_version(destination_version) + .map_err(|()| SendError::DestinationUnsupported)?; + + Ok(((message_size, ticket), cost)) + }, + Err(e) => { + log::trace!(target: LOG_TARGET, "validate - ViaBridgeHubExporter - error: {e:?}"); + Err(e) + }, + } } fn deliver(ticket: Self::Ticket) -> Result { @@ -366,6 +391,7 @@ impl, I: 'static> SendXcm for Pallet { // increase delivery fee factor if required Self::on_message_sent_to_bridge(message_size); + log::trace!(target: LOG_TARGET, "deliver - message sent, xcm_hash: {xcm_hash:?}"); Ok(xcm_hash) } } @@ -444,24 +470,51 @@ mod tests { #[test] fn not_applicable_if_destination_is_within_other_network() { run_test(|| { + // unroutable dest + let dest = Location::new(2, [GlobalConsensus(ByGenesis([0; 32])), Parachain(1000)]); + let xcm: Xcm<()> = vec![ClearOrigin].into(); + + // check that router does not consume when `NotApplicable` + let mut xcm_wrapper = Some(xcm.clone()); assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![].into(), - ), + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), Err(SendError::NotApplicable), ); + // XCM is NOT consumed and untouched + assert_eq!(Some(xcm.clone()), xcm_wrapper); + + // check the full `send_xcm` + assert_eq!(send_xcm::(dest, xcm,), Err(SendError::NotApplicable),); }); } #[test] fn exceeds_max_message_size_if_size_is_above_hard_limit() { run_test(|| { + // routable dest with XCM version + let dest = + Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]); + // oversized XCM + let xcm: Xcm<()> = vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(); + + // dest is routable with the inner router + assert_ok!(ViaBridgeHubExporter::::validate( + &mut Some(dest.clone()), + &mut Some(xcm.clone()) + )); + + // check for oversized message + let mut xcm_wrapper = Some(xcm.clone()); + assert_eq!( + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), + Err(SendError::ExceedsMaxMessageSize), + ); + // XCM is consumed by the inner router + assert!(xcm_wrapper.is_none()); + + // check the full `send_xcm` assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(), - ), + send_xcm::(dest, xcm,), Err(SendError::ExceedsMaxMessageSize), ); }); @@ -470,11 +523,28 @@ mod tests { #[test] fn destination_unsupported_if_wrap_version_fails() { run_test(|| { + // routable dest but we don't know XCM version + let dest = UnknownXcmVersionForRoutableLocation::get(); + let xcm: Xcm<()> = vec![ClearOrigin].into(); + + // dest is routable with the inner router + assert_ok!(ViaBridgeHubExporter::::validate( + &mut Some(dest.clone()), + &mut Some(xcm.clone()) + )); + + // check that it does not pass XCM version check + let mut xcm_wrapper = Some(xcm.clone()); + assert_eq!( + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), + Err(SendError::DestinationUnsupported), + ); + // XCM is consumed by the inner router + assert!(xcm_wrapper.is_none()); + + // check the full `send_xcm` assert_eq!( - send_xcm::( - UnknownXcmVersionLocation::get(), - vec![ClearOrigin].into(), - ), + send_xcm::(dest, xcm,), Err(SendError::DestinationUnsupported), ); }); diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 54e10966d51b..20c86d1da9a2 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -61,7 +61,7 @@ parameter_types! { Some((BridgeFeeAsset::get(), BASE_FEE).into()) ) ]; - pub UnknownXcmVersionLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); + pub UnknownXcmVersionForRoutableLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -76,7 +76,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type BridgedNetworkId = BridgedNetworkId; type Bridges = NetworkExportTable; type DestinationVersion = - LatestOrNoneForLocationVersionChecker>; + LatestOrNoneForLocationVersionChecker>; type BridgeHubOrigin = EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index c808c437b54c..2a9ef6a8e1e9 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -pub use registration::{Registration, StakeAndSlash}; +pub use registration::{ExplicitOrAccountParams, Registration, StakeAndSlash}; use bp_messages::LaneId; use bp_runtime::{ChainId, StorageDoubleMapKeyProvider}; diff --git a/bridges/primitives/relayers/src/registration.rs b/bridges/primitives/relayers/src/registration.rs index bc2d0d127aef..9d9b7e481220 100644 --- a/bridges/primitives/relayers/src/registration.rs +++ b/bridges/primitives/relayers/src/registration.rs @@ -21,7 +21,7 @@ //! required finality proofs). This extension boosts priority of message delivery //! transactions, based on the number of bundled messages. So transaction with more //! messages has larger priority than the transaction with less messages. -//! See `bridge_runtime_common::priority_calculator` for details; +//! See `bridge_runtime_common::extensions::priority_calculator` for details; //! //! This encourages relayers to include more messages to their delivery transactions. //! At the same time, we are not verifying storage proofs before boosting @@ -46,6 +46,21 @@ use sp_runtime::{ DispatchError, DispatchResult, }; +/// Either explicit account reference or `RewardsAccountParams`. +#[derive(Clone, Debug)] +pub enum ExplicitOrAccountParams { + /// Explicit account reference. + Explicit(AccountId), + /// Account, referenced using `RewardsAccountParams`. + Params(RewardsAccountParams), +} + +impl From for ExplicitOrAccountParams { + fn from(params: RewardsAccountParams) -> Self { + ExplicitOrAccountParams::Params(params) + } +} + /// Relayer registration. #[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] pub struct Registration { @@ -90,7 +105,7 @@ pub trait StakeAndSlash { /// Returns `Ok(_)` with non-zero balance if we have failed to repatriate some portion of stake. fn repatriate_reserved( relayer: &AccountId, - beneficiary: RewardsAccountParams, + beneficiary: ExplicitOrAccountParams, amount: Balance, ) -> Result; } @@ -113,7 +128,7 @@ where fn repatriate_reserved( _relayer: &AccountId, - _beneficiary: RewardsAccountParams, + _beneficiary: ExplicitOrAccountParams, _amount: Balance, ) -> Result { Ok(Zero::zero()) diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs index 137deb5b74f7..f18c43cc7f0e 100644 --- a/bridges/relays/messages/src/message_race_delivery.rs +++ b/bridges/relays/messages/src/message_race_delivery.rs @@ -446,7 +446,7 @@ where )) } - /// Returns lastest confirmed message at source chain, given source block. + /// Returns latest confirmed message at source chain, given source block. fn latest_confirmed_nonce_at_source(&self, at: &SourceHeaderIdOf

) -> Option { self.latest_confirmed_nonces_at_source .iter() diff --git a/bridges/relays/messages/src/message_race_strategy.rs b/bridges/relays/messages/src/message_race_strategy.rs index 93d178e55b04..3a532331d79d 100644 --- a/bridges/relays/messages/src/message_race_strategy.rs +++ b/bridges/relays/messages/src/message_race_strategy.rs @@ -567,7 +567,7 @@ mod tests { let source_header_1 = header_id(1); let target_header_1 = header_id(1); - // we start in perfec sync state - all headers are synced and finalized on both ends + // we start in perfect sync state - all headers are synced and finalized on both ends let mut state = TestRaceStateImpl { best_finalized_source_header_id_at_source: Some(source_header_1), best_finalized_source_header_id_at_best_target: Some(source_header_1), diff --git a/bridges/relays/parachains/README.md b/bridges/relays/parachains/README.md index 9043b0b0a9cd..f24e7a4c5d30 100644 --- a/bridges/relays/parachains/README.md +++ b/bridges/relays/parachains/README.md @@ -23,7 +23,7 @@ to return the best known head of given parachain. When required, it must be able finality delivery transaction to the target node. The main entrypoint for the crate is the [`run` function](./src/parachains_loop.rs), which takes source and target -clients and [`ParachainSyncParams`](./src/parachains_loop.rs) parameters. The most imporant parameter is the +clients and [`ParachainSyncParams`](./src/parachains_loop.rs) parameters. The most important parameter is the `parachains` - it is the set of parachains, which relay tracks and updates. The other important parameter that may affect the relay operational costs is the `strategy`. If it is set to `Any`, then the finality delivery transaction is submitted if at least one of tracked parachain heads is updated. The other option is `All`. Then diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 66c9ddc037b8..41aa862be576 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -212,19 +212,19 @@ case "$1" in "ws://127.0.0.1:8943" \ "//Alice" \ "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000000 + 50000000000 * 20)) + 100000000000000 # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain" \ - $((1000000000000 + 2000000000000)) + 100000000000000 # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain" \ - $((1000000000000 + 2000000000000)) + 100000000000000 # set XCM version of remote BridgeHubWestend force_xcm_version \ "ws://127.0.0.1:9942" \ @@ -270,19 +270,19 @@ case "$1" in "ws://127.0.0.1:8945" \ "//Alice" \ "$ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND" \ - $((1000000000000000 + 50000000000 * 20)) + 100000000000000 # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain" \ - $((1000000000000000 + 2000000000000)) + 100000000000000 # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain" \ - $((1000000000000000 + 2000000000000)) + 100000000000000 # set XCM version of remote BridgeHubRococo force_xcm_version \ "ws://127.0.0.1:9945" \ diff --git a/bridges/testing/framework/utils/bridges.sh b/bridges/testing/framework/utils/bridges.sh index 7c8399461584..07d9e4cd50b1 100755 --- a/bridges/testing/framework/utils/bridges.sh +++ b/bridges/testing/framework/utils/bridges.sh @@ -53,7 +53,7 @@ function call_polkadot_js_api() { # With it, it just submits it to the tx pool and exits. # --nonce -1: means to compute transaction nonce using `system_accountNextIndex` RPC, which includes all # transaction that are in the tx pool. - polkadot-js-api --noWait --nonce -1 "$@" + polkadot-js-api --nonce -1 "$@" || true } function generate_hex_encoded_call_data() { diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 3a604b3876d9..32419dc84f59 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -24,12 +24,6 @@ echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 ${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid -# Sometimes the relayer syncs multiple parachain heads in the beginning leading to test failures. -# See issue: https://github.com/paritytech/parity-bridges-common/issues/2838. -# TODO: Remove this sleep after the issue is fixed. -echo -e "Sleeping 180s before runing the tests ...\n" -sleep 180 - run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/westend-to-rococo.zndsl $rococo_dir diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 2b774128c1fb..3fe87e94b7b9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -315,9 +315,10 @@ where let mut parent_header = initial_parent.header; let overseer_handle = &mut params.overseer_handle; - // We mainly call this to inform users at genesis if there is a mismatch with the - // on-chain data. - collator.collator_service().check_block_status(parent_hash, &parent_header); + // Do not try to build upon an unknown, pruned or bad block + if !collator.collator_service().check_block_status(parent_hash, &parent_header) { + continue + } // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index aa5e67e453f6..06f19941165a 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -451,6 +451,17 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result>, ApiError> { Ok(self.rpc_client.parachain_host_claim_queue(at).await?) } + + async fn candidates_pending_availability( + &self, + at: Hash, + para_id: cumulus_primitives_core::ParaId, + ) -> Result>, sp_api::ApiError> { + Ok(self + .rpc_client + .parachain_host_candidates_pending_availability(at, para_id) + .await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 547803865c28..864ce6c57125 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -655,6 +655,20 @@ impl RelayChainRpcClient { .await } + /// Get the receipt of all candidates pending availability. + pub async fn parachain_host_candidates_pending_availability( + &self, + at: RelayHash, + para_id: ParaId, + ) -> Result, RelayChainError> { + self.call_remote_runtime_function( + "ParachainHost_candidates_pending_availability", + at, + Some(para_id), + ) + .await + } + pub async fn validation_code_hash( &self, at: RelayHash, diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index b4cd925d540e..deced13a9e81 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -942,7 +942,10 @@ impl SendXcm for Pallet { Self::deposit_event(Event::XcmpMessageSent { message_hash: hash }); Ok(hash) }, - Err(e) => Err(SendError::Transport(e.into())), + Err(e) => { + log::error!(target: LOG_TARGET, "Deliver error: {e:?}"); + Err(SendError::Transport(e.into())) + }, } } } diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 377870f9e2b3..8f096fa6a962 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -7,7 +7,9 @@ "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM", "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", - "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3" + "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", + "/dns/coretime-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw", + "/dns/coretime-westend-boot-ng.dwellir.com/tcp/30356/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index aa1622d1dbd4..6dd8579cf257 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -7,14 +7,24 @@ "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", - "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", - "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", + "/dns/people-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWHb7bp7fvxCwR1i6m8xn4j1ZSVZ6a49TVYbrWSC2sJhn4", + "/dns/people-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWHb7bp7fvxCwR1i6m8xn4j1ZSVZ6a49TVYbrWSC2sJhn4", "/dns/boot-node.helikon.io/tcp/9520/p2p/12D3KooWHhZk21Wzvsd3Un1Cp63diXqr6idbG1MEiUWaitUZuX4c", - "/dns/boot-node.helikon.io/tcp/9522/wss/p2p/12D3KooWHhZk21Wzvsd3Un1Cp63diXqr6idbG1MEiUWaitUZuX4c" + "/dns/boot-node.helikon.io/tcp/9522/wss/p2p/12D3KooWHhZk21Wzvsd3Un1Cp63diXqr6idbG1MEiUWaitUZuX4c", + "/dns/boot.metaspan.io/tcp/35068/p2p/12D3KooWAtw8ybFXNmNdTUsvt2gfKwtuea9wDQT2b8FpbVNKYGwc", + "/dns/boot.metaspan.io/tcp/35069/wss/p2p/12D3KooWAtw8ybFXNmNdTUsvt2gfKwtuea9wDQT2b8FpbVNKYGwc", + "/dns/boot.stake.plus/tcp/46333/p2p/12D3KooWLNWUF4H5WE3dy2rPB56gVcR48XY2rHwEaZ6pGTK6HYFi", + "/dns/boot.stake.plus/tcp/46334/wss/p2p/12D3KooWLNWUF4H5WE3dy2rPB56gVcR48XY2rHwEaZ6pGTK6HYFi", + "/dns/boot.gatotech.network/tcp/33340/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", + "/dns/boot.gatotech.network/tcp/35340/wss/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", + "/dns/people-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", + "/dns/people-westend.bootnode.amforc.com/tcp/30346/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", + "/dns/people-westend-bootnode.turboflakes.io/tcp/30650/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", + "/dns/people-westend-bootnode.turboflakes.io/tcp/30750/wss/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", + "/dns/people-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", + "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index 98762beb0cb2..8100e6813488 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -23,3 +23,6 @@ emulated-integration-tests-common = { path = "../../../../common", default-featu asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } rococo-emulated-chain = { path = "../../../relays/rococo" } testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index f1e972e869dc..202d02b250bb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -22,7 +22,8 @@ use frame_support::traits::OnInitialize; use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, impl_assets_helpers_for_parachain, impl_assets_helpers_for_system_parachain, - impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, + impl_foreign_assets_helpers_for_parachain, impl_xcm_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -56,4 +57,5 @@ impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo); impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); impl_assets_helpers_for_parachain!(AssetHubRococo); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v3::Location); impl_xcm_helpers_for_parachain!(AssetHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index a42a9abf618d..e0abaa66c5ca 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -23,3 +23,6 @@ emulated-integration-tests-common = { path = "../../../../common", default-featu asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } westend-emulated-chain = { path = "../../../relays/westend" } testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 7f05eefb4c20..6043a6aeda48 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -22,7 +22,8 @@ use frame_support::traits::OnInitialize; use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, impl_assets_helpers_for_parachain, impl_assets_helpers_for_system_parachain, - impl_xcm_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, + impl_foreign_assets_helpers_for_parachain, impl_xcm_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -56,4 +57,5 @@ impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend); impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); impl_assets_helpers_for_parachain!(AssetHubWestend); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v3::Location); impl_xcm_helpers_for_parachain!(AssetHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index d81ab8143ddb..450439f5ea30 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -17,8 +17,6 @@ use frame_support::parameter_types; use sp_core::{sr25519, storage::Storage}; -// Polkadot -use xcm::v3::Location; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, @@ -79,20 +77,9 @@ pub fn genesis(para_id: u32) -> Storage { foreign_assets: penpal_runtime::ForeignAssetsConfig { assets: vec![ // Relay Native asset representation - ( - Location::try_from(RelayLocation::get()).expect("conversion works"), - PenpalAssetOwner::get(), - true, - ED, - ), + (RelayLocation::get(), PenpalAssetOwner::get(), true, ED), // Sufficient AssetHub asset representation - ( - Location::try_from(LocalReservableFromAssetHub::get()) - .expect("conversion works"), - PenpalAssetOwner::get(), - true, - ED, - ), + (LocalReservableFromAssetHub::get(), PenpalAssetOwner::get(), true, ED), ], ..Default::default() }, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 0b49c7a3e091..c268b014bfa3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -16,16 +16,20 @@ mod genesis; pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ - CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, XcmConfig, + CustomizableAssetFromSystemAssetHub, RelayNetworkId as PenpalRelayNetworkId, }; // Substrate use frame_support::traits::OnInitialize; +use sp_core::Encode; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, + impl_xcm_helpers_for_parachain, + impls::{NetworkId, Parachain}, + xcm_emulator::decl_test_parachains, }; // Penpal Parachain declaration @@ -34,6 +38,10 @@ decl_test_parachains! { genesis = genesis(PARA_ID_A), on_init = { penpal_runtime::AuraExt::on_initialize(1); + frame_support::assert_ok!(penpal_runtime::System::set_storage( + penpal_runtime::RuntimeOrigin::root(), + vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::Rococo.encode())], + )); }, runtime = penpal_runtime, core = { @@ -53,6 +61,10 @@ decl_test_parachains! { genesis = genesis(PARA_ID_B), on_init = { penpal_runtime::AuraExt::on_initialize(1); + frame_support::assert_ok!(penpal_runtime::System::set_storage( + penpal_runtime::RuntimeOrigin::root(), + vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::Westend.encode())], + )); }, runtime = penpal_runtime, core = { @@ -76,4 +88,8 @@ impl_accounts_helpers_for_parachain!(PenpalB); impl_assert_events_helpers_for_parachain!(PenpalA); impl_assert_events_helpers_for_parachain!(PenpalB); impl_assets_helpers_for_parachain!(PenpalA); +impl_foreign_assets_helpers_for_parachain!(PenpalA, xcm::latest::Location); impl_assets_helpers_for_parachain!(PenpalB); +impl_foreign_assets_helpers_for_parachain!(PenpalB, xcm::latest::Location); +impl_xcm_helpers_for_parachain!(PenpalA); +impl_xcm_helpers_for_parachain!(PenpalB); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index 379a29d697bc..7a3a936ec972 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -39,6 +39,8 @@ decl_test_relay_chains! { Hrmp: rococo_runtime::Hrmp, Identity: rococo_runtime::Identity, IdentityMigrator: rococo_runtime::IdentityMigrator, + Treasury: rococo_runtime::Treasury, + AssetRate: rococo_runtime::AssetRate, } }, } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 618c3addc5d0..c8a2f097abe9 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -38,9 +38,7 @@ pub use polkadot_runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, }; pub use xcm::{ - prelude::{Location, OriginKind, Outcome, VersionedXcm, XcmVersion}, - v3, - v4::Error as XcmError, + prelude::{Location, OriginKind, Outcome, VersionedXcm, XcmError, XcmVersion}, DoubleEncoded, }; @@ -696,12 +694,12 @@ macro_rules! impl_assets_helpers_for_system_parachain { #[macro_export] macro_rules! impl_assets_helpers_for_parachain { - ( $chain:ident) => { + ($chain:ident) => { $crate::impls::paste::paste! { impl $chain { - /// Create foreign assets using sudo `ForeignAssets::force_create()` - pub fn force_create_foreign_asset( - id: $crate::impls::v3::Location, + /// Create assets using sudo `Assets::force_create()` + pub fn force_create_asset( + id: u32, owner: $crate::impls::AccountId, is_sufficient: bool, min_balance: u128, @@ -711,20 +709,20 @@ macro_rules! impl_assets_helpers_for_parachain { let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); ::execute_with(|| { $crate::impls::assert_ok!( - ]>::ForeignAssets::force_create( + ]>::Assets::force_create( sudo_origin, - id.clone(), + id.clone().into(), owner.clone().into(), is_sufficient, min_balance, ) ); - assert!(]>::ForeignAssets::asset_exists(id.clone())); + assert!(]>::Assets::asset_exists(id.clone())); type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::ForeignAssets( + RuntimeEvent::::Assets( $crate::impls::pallet_assets::Event::ForceCreated { asset_id, .. @@ -736,19 +734,19 @@ macro_rules! impl_assets_helpers_for_parachain { for (beneficiary, amount) in prefund_accounts.into_iter() { let signed_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); - Self::mint_foreign_asset(signed_origin, id.clone(), beneficiary, amount); + Self::mint_asset(signed_origin, id.clone(), beneficiary, amount); } } - /// Mint assets making use of the ForeignAssets pallet-assets instance - pub fn mint_foreign_asset( + /// Mint assets making use of the assets pallet + pub fn mint_asset( signed_origin: ::RuntimeOrigin, - id: $crate::impls::v3::Location, + id: u32, beneficiary: $crate::impls::AccountId, amount_to_mint: u128, ) { ::execute_with(|| { - $crate::impls::assert_ok!(]>::ForeignAssets::mint( + $crate::impls::assert_ok!(]>::Assets::mint( signed_origin, id.clone().into(), beneficiary.clone().into(), @@ -760,7 +758,7 @@ macro_rules! impl_assets_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::ForeignAssets( + RuntimeEvent::::Assets( $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } ) => { asset_id: *asset_id == id, @@ -771,9 +769,39 @@ macro_rules! impl_assets_helpers_for_parachain { ); }); } - /// Create assets using sudo `Assets::force_create()` - pub fn force_create_asset( - id: u32, + + /// Returns the encoded call for `create` from the assets pallet + pub fn create_asset_call( + asset_id: u32, + min_balance: $crate::impls::Balance, + admin: $crate::impls::AccountId, + ) -> $crate::impls::DoubleEncoded<()> { + use $crate::impls::{Chain, Encode}; + + ::RuntimeCall::Assets($crate::impls::pallet_assets::Call::< + ::Runtime, + $crate::impls::pallet_assets::Instance1, + >::create { + id: asset_id.into(), + min_balance, + admin: admin.into(), + }) + .encode() + .into() + } + } + } + }; +} + +#[macro_export] +macro_rules! impl_foreign_assets_helpers_for_parachain { + ($chain:ident, $asset_id_type:ty) => { + $crate::impls::paste::paste! { + impl $chain { + /// Create foreign assets using sudo `ForeignAssets::force_create()` + pub fn force_create_foreign_asset( + id: $asset_id_type, owner: $crate::impls::AccountId, is_sufficient: bool, min_balance: u128, @@ -783,20 +811,20 @@ macro_rules! impl_assets_helpers_for_parachain { let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); ::execute_with(|| { $crate::impls::assert_ok!( - ]>::Assets::force_create( + ]>::ForeignAssets::force_create( sudo_origin, - id.clone().into(), + id.clone(), owner.clone().into(), is_sufficient, min_balance, ) ); - assert!(]>::Assets::asset_exists(id.clone())); + assert!(]>::ForeignAssets::asset_exists(id.clone())); type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::Assets( + RuntimeEvent::::ForeignAssets( $crate::impls::pallet_assets::Event::ForceCreated { asset_id, .. @@ -808,19 +836,19 @@ macro_rules! impl_assets_helpers_for_parachain { for (beneficiary, amount) in prefund_accounts.into_iter() { let signed_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); - Self::mint_asset(signed_origin, id.clone(), beneficiary, amount); + Self::mint_foreign_asset(signed_origin, id.clone(), beneficiary, amount); } } - /// Mint assets making use of the assets pallet - pub fn mint_asset( + /// Mint assets making use of the ForeignAssets pallet-assets instance + pub fn mint_foreign_asset( signed_origin: ::RuntimeOrigin, - id: u32, + id: $asset_id_type, beneficiary: $crate::impls::AccountId, amount_to_mint: u128, ) { ::execute_with(|| { - $crate::impls::assert_ok!(]>::Assets::mint( + $crate::impls::assert_ok!(]>::ForeignAssets::mint( signed_origin, id.clone().into(), beneficiary.clone().into(), @@ -832,7 +860,7 @@ macro_rules! impl_assets_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::Assets( + RuntimeEvent::::ForeignAssets( $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } ) => { asset_id: *asset_id == id, @@ -844,29 +872,9 @@ macro_rules! impl_assets_helpers_for_parachain { }); } - /// Returns the encoded call for `create` from the assets pallet - pub fn create_asset_call( - asset_id: u32, - min_balance: $crate::impls::Balance, - admin: $crate::impls::AccountId, - ) -> $crate::impls::DoubleEncoded<()> { - use $crate::impls::{Chain, Encode}; - - ::RuntimeCall::Assets($crate::impls::pallet_assets::Call::< - ::Runtime, - $crate::impls::pallet_assets::Instance1, - >::create { - id: asset_id.into(), - min_balance, - admin: admin.into(), - }) - .encode() - .into() - } - /// Returns the encoded call for `create` from the foreign assets pallet pub fn create_foreign_asset_call( - asset_id: $crate::impls::v3::Location, + asset_id: $asset_id_type, min_balance: $crate::impls::Balance, admin: $crate::impls::AccountId, ) -> $crate::impls::DoubleEncoded<()> { diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs index ee8b038a364d..d87bc5aa9633 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs @@ -25,7 +25,7 @@ use asset_hub_rococo_emulated_chain::AssetHubRococo; use asset_hub_westend_emulated_chain::AssetHubWestend; use bridge_hub_rococo_emulated_chain::BridgeHubRococo; use bridge_hub_westend_emulated_chain::BridgeHubWestend; -use penpal_emulated_chain::PenpalA; +use penpal_emulated_chain::{PenpalA, PenpalB}; use rococo_emulated_chain::Rococo; use westend_emulated_chain::Westend; @@ -48,13 +48,13 @@ decl_test_networks! { PenpalA, ], bridge = RococoWestendMockBridge - }, pub struct WestendMockNet { relay_chain = Westend, parachains = vec![ AssetHubWestend, BridgeHubWestend, + PenpalB, ], bridge = WestendRococoMockBridge }, @@ -96,5 +96,6 @@ decl_test_sender_receiver_accounts_parameter_types! { WestendRelay { sender: ALICE, receiver: BOB }, AssetHubWestendPara { sender: ALICE, receiver: BOB }, BridgeHubWestendPara { sender: ALICE, receiver: BOB }, - PenpalAPara { sender: ALICE, receiver: BOB } + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index c5a672234a0d..ddd6d2d04982 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -21,15 +21,20 @@ pallet-balances = { path = "../../../../../../../substrate/frame/balances", defa pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } parachains-common = { path = "../../../../../common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index a5a4914e21d8..322c6cf1f228 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -30,6 +30,7 @@ mod imports { prelude::{AccountId32 as AccountId32Junction, *}, v3, }; + pub use xcm_executor::traits::TransferType; // Cumulus pub use asset_test_utils::xcm_helpers; @@ -81,6 +82,7 @@ mod imports { pub type SystemParaToParaTest = Test; pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; + pub type ParaToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs new file mode 100644 index 000000000000..6bdf89e6f277 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs @@ -0,0 +1,628 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::reserve_transfer::*; +use crate::{ + imports::*, + tests::teleport::do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt, +}; + +fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_a_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + let sov_penpal_b_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalB::para_id()), + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Withdrawn from sender parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_penpal_a_on_ah, + amount: *amount == t.args.amount, + }, + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_b_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::LocalReserve), + bx!(fee.id.into()), + bx!(TransferType::LocalReserve), + t.args.weight_limit, + ) +} + +fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + t.args.weight_limit, + ) +} + +fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), + bx!(fee.id.into()), + bx!(TransferType::RemoteReserve(asset_hub_location.into())), + t.args.weight_limit, + ) +} + +fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + t.args.weight_limit, + ) +} + +fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::LocalReserve), + t.args.weight_limit, + ) +} + +// =========================================================================== +// ======= Transfer - Native + Bridged Assets - AssetHub->Parachain ========== +// =========================================================================== +/// Transfers of native asset plus bridged asset from AssetHub to some Parachain +/// while paying fees using native asset. +#[test] +fn transfer_foreign_assets_from_asset_hub_to_para() { + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sender = AssetHubRococoSender::get(); + let native_amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; + let native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + let assets_owner = PenpalAssetOwner::get(); + // Foreign asset used: bridged WND + let foreign_amount_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; + let wnd_at_rococo_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + + // Configure destination chain to trust AH as reserve of WND + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Westend)]).encode(), + )], + )); + }); + PenpalA::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubRococo::force_create_foreign_asset( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubRococo::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + wnd_at_rococo_parachains.clone().try_into().unwrap(), + sender.clone(), + foreign_amount_to_send * 2, + ); + + // Assets to send + let assets: Vec = vec![ + (Parent, native_amount_to_send).into(), + (wnd_at_rococo_parachains.clone(), foreign_amount_to_send).into(), + ]; + let fee_asset_id = AssetId(Parent.into()); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + native_amount_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = SystemParaToParaTest::new(test_args); + + // Query initial balances + let sender_balance_before = test.sender.balance; + let sender_wnds_before = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sender, + ) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location.clone(), &receiver) + }); + let receiver_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &receiver) + }); + + // Set assertions and dispatchables + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); + test.set_dispatchable::(ah_to_para_transfer_assets); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sender_wnds_after = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sender, + ) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location, &receiver) + }); + let receiver_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - native_amount_to_send); + // Sender's balance is reduced by foreign amount sent + assert_eq!(sender_wnds_after, sender_wnds_before - foreign_amount_to_send); + // Receiver's assets is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + native_amount_to_send); + // Receiver's balance is increased by foreign amount sent + assert_eq!(receiver_wnds_after, receiver_wnds_before + foreign_amount_to_send); +} + +/// Reserve Transfers of native asset from Parachain to System Parachain should work +// =========================================================================== +// ======= Transfer - Native + Bridged Assets - Parachain->AssetHub ========== +// =========================================================================== +/// Transfers of native asset plus bridged asset from some Parachain to AssetHub +/// while paying fees using native asset. +#[test] +fn transfer_foreign_assets_from_para_to_asset_hub() { + // Init values for Parachain + let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let sender = PenpalASender::get(); + let native_amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; + let native_asset_location = RelayLocation::get(); + let assets_owner = PenpalAssetOwner::get(); + + // Foreign asset used: bridged WND + let foreign_amount_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; + let wnd_at_rococo_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + + // Configure destination chain to trust AH as reserve of WND + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Westend)]).encode(), + )], + )); + }); + PenpalA::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubRococo::force_create_foreign_asset( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + + // fund Parachain's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + native_asset_location.clone(), + sender.clone(), + native_amount_to_send * 2, + ); + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + wnd_at_rococo_parachains.clone(), + sender.clone(), + foreign_amount_to_send * 2, + ); + + // Init values for System Parachain + let receiver = AssetHubRococoReceiver::get(); + let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); + + // fund Parachain's SA on AssetHub with the assets held in reserve + AssetHubRococo::fund_accounts(vec![( + sov_penpal_on_ahr.clone().into(), + native_amount_to_send * 2, + )]); + AssetHubRococo::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + wnd_at_rococo_parachains.clone().try_into().unwrap(), + sov_penpal_on_ahr, + foreign_amount_to_send * 2, + ); + + // Assets to send + let assets: Vec = vec![ + (Parent, native_amount_to_send).into(), + (wnd_at_rococo_parachains.clone(), foreign_amount_to_send).into(), + ]; + let fee_asset_id = AssetId(Parent.into()); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + native_amount_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = ParaToSystemParaTest::new(test_args); + + // Query initial balances + let sender_native_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location.clone(), &sender) + }); + let sender_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &sender) + }); + let receiver_native_before = test.receiver.balance; + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &receiver, + ) + }); + + // Set assertions and dispatchables + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_ah_transfer_assets); + test.assert(); + + // Query final balances + let sender_native_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location, &sender) + }); + let sender_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &sender) + }); + let receiver_native_after = test.receiver.balance; + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.try_into().unwrap(), + &receiver, + ) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_native_after < sender_native_before - native_amount_to_send); + // Sender's balance is reduced by foreign amount sent + assert_eq!(sender_wnds_after, sender_wnds_before - foreign_amount_to_send); + // Receiver's balance is increased + assert!(receiver_native_after > receiver_native_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_native_after < receiver_native_before + native_amount_to_send); + // Receiver's balance is increased by foreign amount sent + assert_eq!(receiver_wnds_after, receiver_wnds_before + foreign_amount_to_send); +} + +// ============================================================================== +// ===== Transfer - Native + Bridged Assets - Parachain->AssetHub->Parachain ==== +// ============================================================================== +/// Transfers of native asset plus bridged asset from Parachain to Parachain +/// (through AssetHub reserve) with fees paid using native asset. +#[test] +fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { + // Init values for Parachain Origin + let destination = PenpalA::sibling_location_of(PenpalB::para_id()); + let sender = PenpalASender::get(); + let roc_to_send: Balance = ROCOCO_ED * 10000; + let assets_owner = PenpalAssetOwner::get(); + let roc_location = RelayLocation::get(); + let sender_as_seen_by_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_of_sender_on_ah = AssetHubRococo::sovereign_account_id_of(sender_as_seen_by_ah); + let receiver_as_seen_by_ah = AssetHubRococo::sibling_location_of(PenpalB::para_id()); + let sov_of_receiver_on_ah = AssetHubRococo::sovereign_account_id_of(receiver_as_seen_by_ah); + let wnd_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; + + // Configure destination chain to trust AH as reserve of WND + PenpalB::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Westend)]).encode(), + )], + )); + }); + + // Register WND as foreign asset and transfer it around the Rococo ecosystem + let wnd_at_rococo_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + AssetHubRococo::force_create_foreign_asset( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + PenpalA::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + PenpalB::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + + // fund Parachain's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + roc_location.clone(), + sender.clone(), + roc_to_send * 2, + ); + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + wnd_at_rococo_parachains.clone(), + sender.clone(), + wnd_to_send * 2, + ); + // fund the Parachain Origin's SA on Asset Hub with the assets held in reserve + AssetHubRococo::fund_accounts(vec![(sov_of_sender_on_ah.clone().into(), roc_to_send * 2)]); + AssetHubRococo::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + wnd_at_rococo_parachains.clone().try_into().unwrap(), + sov_of_sender_on_ah.clone(), + wnd_to_send * 2, + ); + + // Init values for Parachain Destination + let receiver = PenpalBReceiver::get(); + + // Assets to send + let assets: Vec = vec![ + (roc_location.clone(), roc_to_send).into(), + (wnd_at_rococo_parachains.clone(), wnd_to_send).into(), + ]; + let fee_asset_id: AssetId = roc_location.clone().into(); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination, + receiver.clone(), + roc_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = ParaToParaThroughAHTest::new(test_args); + + // Query initial balances + let sender_rocs_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_location.clone(), &sender) + }); + let sender_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &sender) + }); + let rocs_in_sender_reserve_on_ahr_before = + ::account_data_of(sov_of_sender_on_ah.clone()).free; + let wnds_in_sender_reserve_on_ahr_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sov_of_sender_on_ah, + ) + }); + let rocs_in_receiver_reserve_on_ahr_before = + ::account_data_of(sov_of_receiver_on_ah.clone()).free; + let wnds_in_receiver_reserve_on_ahr_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sov_of_receiver_on_ah, + ) + }); + let receiver_rocs_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_location.clone(), &receiver) + }); + let receiver_wnds_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &receiver) + }); + + // Set assertions and dispatchables + test.set_assertion::(para_to_para_through_hop_sender_assertions); + test.set_assertion::(para_to_para_assethub_hop_assertions); + test.set_assertion::(para_to_para_through_hop_receiver_assertions); + test.set_dispatchable::(para_to_para_transfer_assets_through_ah); + test.assert(); + + // Query final balances + let sender_rocs_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_location.clone(), &sender) + }); + let sender_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone(), &sender) + }); + let wnds_in_sender_reserve_on_ahr_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sov_of_sender_on_ah, + ) + }); + let rocs_in_sender_reserve_on_ahr_after = + ::account_data_of(sov_of_sender_on_ah).free; + let wnds_in_receiver_reserve_on_ahr_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone().try_into().unwrap(), + &sov_of_receiver_on_ah, + ) + }); + let rocs_in_receiver_reserve_on_ahr_after = + ::account_data_of(sov_of_receiver_on_ah).free; + let receiver_rocs_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_location, &receiver) + }); + let receiver_wnds_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_rocs_after < sender_rocs_before - roc_to_send); + assert_eq!(sender_wnds_after, sender_wnds_before - wnd_to_send); + // Sovereign accounts on reserve are changed accordingly + assert_eq!( + rocs_in_sender_reserve_on_ahr_after, + rocs_in_sender_reserve_on_ahr_before - roc_to_send + ); + assert_eq!( + wnds_in_sender_reserve_on_ahr_after, + wnds_in_sender_reserve_on_ahr_before - wnd_to_send + ); + assert!(rocs_in_receiver_reserve_on_ahr_after > rocs_in_receiver_reserve_on_ahr_before); + assert_eq!( + wnds_in_receiver_reserve_on_ahr_after, + wnds_in_receiver_reserve_on_ahr_before + wnd_to_send + ); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + assert_eq!(receiver_wnds_after, receiver_wnds_before + wnd_to_send); +} + +// ============================================================================================== +// ==== Bidirectional Transfer - Native + Teleportable Foreign Assets - Parachain<->AssetHub ==== +// ============================================================================================== +/// Transfers of native asset plus teleportable foreign asset from Parachain to AssetHub and back +/// with fees paid using native asset. +#[test] +fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explicit_transfer_types() { + do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_asset_hub_teleport_foreign_assets, + asset_hub_to_para_teleport_foreign_assets, + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs index b3841af0e6c3..346af3082384 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -13,8 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod foreign_assets_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; mod swap; mod teleport; +mod treasury; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index a0738839087a..5aef70f5cbfc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -47,7 +47,7 @@ fn para_to_relay_sender_assertions(t: ParaToRelayTest) { RuntimeEvent::ForeignAssets( pallet_assets::Event::Burned { asset_id, owner, balance, .. } ) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, @@ -55,70 +55,92 @@ fn para_to_relay_sender_assertions(t: ParaToRelayTest) { ); } -fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { +pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - - AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 864_610_000, - 8_799, - ))); - + AssetHubRococo::assert_xcm_pallet_attempted_complete(None); + + let sov_acc_of_dest = AssetHubRococo::sovereign_account_id_of(t.args.dest.clone()); + for (idx, asset) in t.args.assets.into_inner().into_iter().enumerate() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + if idx == t.args.fee_asset_item as usize { + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount of native asset is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == sov_acc_of_dest, + amount: *amount == asset_amount, + }, + ] + ); + } else { + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount of foreign asset is transferred to Parachain's Sovereign account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Transferred { asset_id, from, to, amount }, + ) => { + asset_id: *asset_id == expected_id, + from: *from == t.sender.account_id, + to: *to == sov_acc_of_dest, + amount: *amount == asset_amount, + }, + ] + ); + } + } assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from, to, amount } - ) => { - from: *from == t.sender.account_id, - to: *to == AssetHubRococo::sovereign_account_id_of( - t.args.dest.clone() - ), - amount: *amount == t.args.amount, - }, // Transport fees are paid - RuntimeEvent::PolkadotXcm( - pallet_xcm::Event::FeesPaid { .. } - ) => {}, + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); AssetHubRococo::assert_xcm_pallet_sent(); } -fn system_para_to_para_receiver_assertions(t: SystemParaToParaTest) { +pub fn system_para_to_para_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); PenpalA::assert_xcmp_queue_success(None); - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == system_para_native_asset_location, - owner: *owner == t.receiver.account_id, - }, - ] - ); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0.try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } } -fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { +pub fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); - assert_expected_events!( - PenpalA, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::ForeignAssets( - pallet_assets::Event::Burned { asset_id, owner, balance, .. } - ) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), - owner: *owner == t.sender.account_id, - balance: *balance == t.args.amount, - }, - ] - ); + PenpalA::assert_xcm_pallet_attempted_complete(None); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0; + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance } + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.sender.account_id, + balance: *balance == asset_amount, + }, + ] + ); + } } fn para_to_relay_receiver_assertions(t: ParaToRelayTest) { @@ -150,25 +172,57 @@ fn para_to_relay_receiver_assertions(t: ParaToRelayTest) { ); } -fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { +pub fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( - AssetHubRococo::sibling_location_of(PenpalA::para_id()), - ); - AssetHubRococo::assert_xcmp_queue_success(None); + let sov_acc_of_penpal = AssetHubRococo::sovereign_account_id_of(t.args.dest.clone()); + for (idx, asset) in t.args.assets.into_inner().into_iter().enumerate() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + if idx == t.args.fee_asset_item as usize { + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount of native is withdrawn from Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_acc_of_penpal.clone().into(), + amount: *amount == asset_amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == t.receiver.account_id, + }, + ] + ); + } else { + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount of foreign asset is transferred from Parachain's Sovereign account + // to Receiver's account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == sov_acc_of_penpal, + balance: *balance == asset_amount, + }, + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Issued { asset_id, owner, amount }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + amount: *amount == asset_amount, + }, + ] + ); + } + } assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is withdrawn from Parachain's Sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahr.clone().into(), - amount: *amount == t.args.amount, - }, - RuntimeEvent::Balances(pallet_balances::Event::Minted { .. }) => {}, RuntimeEvent::MessageQueue( pallet_message_queue::Event::Processed { success: true, .. } ) => {}, @@ -212,10 +266,8 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let reservable_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let reservable_asset_location = PenpalLocalReservableFromAssetHub::get(); PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8799))); assert_expected_events!( PenpalA, @@ -245,14 +297,13 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_asset_location = PenpalLocalReservableFromAssetHub::get(); PenpalA::assert_xcmp_queue_success(None); assert_expected_events!( PenpalA, vec![ RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.receiver.account_id, }, RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { @@ -304,7 +355,7 @@ fn relay_to_para_assets_receiver_assertions(t: RelayToParaTest) { PenpalA, vec![ RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.receiver.account_id, }, RuntimeEvent::MessageQueue( @@ -314,29 +365,27 @@ fn relay_to_para_assets_receiver_assertions(t: RelayToParaTest) { ); } -fn para_to_para_through_relay_sender_assertions(t: ParaToParaThroughRelayTest) { +pub fn para_to_para_through_hop_sender_assertions(t: Test) { type RuntimeEvent = ::RuntimeEvent; - let relay_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - PenpalA::assert_xcm_pallet_attempted_complete(None); - // XCM sent to relay reserve - PenpalA::assert_parachain_system_ump_sent(); - - assert_expected_events!( - PenpalA, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::ForeignAssets( - pallet_assets::Event::Burned { asset_id, owner, balance }, - ) => { - asset_id: *asset_id == relay_asset_location, - owner: *owner == t.sender.account_id, - balance: *balance == t.args.amount, - }, - ] - ); + for asset in t.args.assets.into_inner() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.sender.account_id, + balance: *balance == amount, + }, + ] + ); + } } fn para_to_para_relay_hop_assertions(t: ParaToParaThroughRelayTest) { @@ -369,22 +418,22 @@ fn para_to_para_relay_hop_assertions(t: ParaToParaThroughRelayTest) { ); } -fn para_to_para_through_relay_receiver_assertions(t: ParaToParaThroughRelayTest) { +pub fn para_to_para_through_hop_receiver_assertions(t: Test) { type RuntimeEvent = ::RuntimeEvent; - let relay_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); PenpalB::assert_xcmp_queue_success(None); - - assert_expected_events!( - PenpalB, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == relay_asset_location, - owner: *owner == t.receiver.account_id, - }, - ] - ); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0.try_into().unwrap(); + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } } fn relay_to_para_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { @@ -526,8 +575,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let amount_to_send: Balance = ROCOCO_ED * 1000; // Init values fot Parachain - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); // Init Test @@ -542,7 +590,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender_balance_before = test.sender.balance; let receiver_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &receiver) + >::balance(relay_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables @@ -555,7 +603,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &receiver) + >::balance(relay_native_asset_location, &receiver) }); // Sender's balance is reduced by amount sent plus delivery fees @@ -577,13 +625,12 @@ fn reserve_transfer_native_asset_from_para_to_relay() { let amount_to_send: Balance = ROCOCO_ED * 1000; let assets: Assets = (Parent, amount_to_send).into(); let asset_owner = PenpalAssetOwner::get(); - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - relay_native_asset_location, + relay_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -614,7 +661,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; @@ -627,7 +674,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &sender) + >::balance(relay_native_asset_location, &sender) }); let receiver_balance_after = test.receiver.balance; @@ -654,8 +701,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let assets: Assets = (Parent, amount_to_send).into(); // Init values for Parachain - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); // Init Test @@ -677,7 +723,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let sender_balance_before = test.sender.balance; let receiver_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.into(), &receiver) + >::balance(system_para_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables @@ -711,14 +757,13 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { let sender = PenpalASender::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; let assets: Assets = (Parent, amount_to_send).into(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let asset_owner = PenpalAssetOwner::get(); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - system_para_native_asset_location, + system_para_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -749,7 +794,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_para_native_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; @@ -776,9 +821,9 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -// ========================================================================= -// ======= Reserve Transfers - Non-system Asset - AssetHub<>Parachain ====== -// ========================================================================= +// ================================================================================== +// ======= Reserve Transfers - Native + Non-system Asset - AssetHub<>Parachain ====== +// ================================================================================== /// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should /// work #[test] @@ -817,10 +862,8 @@ fn reserve_transfer_assets_from_system_para_to_para() { // Init values for Parachain let receiver = PenpalAReceiver::get(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let system_para_foreign_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); // Init Test let para_test_args = TestContext { @@ -845,11 +888,14 @@ fn reserve_transfer_assets_from_system_para_to_para() { }); let receiver_system_native_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &receiver) + >::balance(system_para_native_asset_location.clone(), &receiver) }); let receiver_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &receiver) + >::balance( + system_para_foreign_asset_location.clone(), + &receiver, + ) }); // Set assertions and dispatchables @@ -866,7 +912,7 @@ fn reserve_transfer_assets_from_system_para_to_para() { }); let receiver_system_native_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &receiver) + >::balance(system_para_native_asset_location.clone(), &receiver) }); let receiver_foreign_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -904,14 +950,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { let asset_amount_to_send = ASSET_HUB_ROCOCO_ED * 10000; let penpal_asset_owner = PenpalAssetOwner::get(); let penpal_asset_owner_signer = ::RuntimeOrigin::signed(penpal_asset_owner); - let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); - let asset_location_on_penpal_latest: Location = asset_location_on_penpal.try_into().unwrap(); - let system_asset_location_on_penpal = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let asset_location_on_penpal = PenpalLocalReservableFromAssetHub::get(); + let system_asset_location_on_penpal = RelayLocation::get(); let assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (asset_location_on_penpal_latest, asset_amount_to_send).into(), + (asset_location_on_penpal.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = assets @@ -938,10 +981,8 @@ fn reserve_transfer_assets_from_para_to_system_para() { let receiver = AssetHubRococoReceiver::get(); let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let system_para_foreign_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); let ah_asset_owner = AssetHubRococoAssetOwner::get(); let ah_asset_owner_signer = ::RuntimeOrigin::signed(ah_asset_owner); @@ -976,11 +1017,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query initial balances let sender_system_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_para_native_asset_location.clone(), &sender) }); let sender_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &sender) + >::balance(system_para_foreign_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; let receiver_assets_before = AssetHubRococo::execute_with(|| { @@ -997,7 +1038,7 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query final balances let sender_system_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_para_native_asset_location.clone(), &sender) }); let sender_foreign_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1029,22 +1070,21 @@ fn reserve_transfer_assets_from_para_to_system_para() { /// Reserve Transfers of native asset from Parachain to Parachain (through Relay reserve) should /// work #[test] -fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { +fn reserve_transfer_native_asset_from_para_to_para_through_relay() { // Init values for Parachain Origin let destination = PenpalA::sibling_location_of(PenpalB::para_id()); let sender = PenpalASender::get(); let amount_to_send: Balance = ROCOCO_ED * 10000; let asset_owner = PenpalAssetOwner::get(); let assets = (Parent, amount_to_send).into(); - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); let sender_as_seen_by_relay = Rococo::child_location_of(PenpalA::para_id()); let sov_of_sender_on_relay = Rococo::sovereign_account_id_of(sender_as_seen_by_relay); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - relay_native_asset_location, + relay_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -1066,24 +1106,24 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_assets_before = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &receiver) + >::balance(relay_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables - test.set_assertion::(para_to_para_through_relay_sender_assertions); + test.set_assertion::(para_to_para_through_hop_sender_assertions); test.set_assertion::(para_to_para_relay_hop_assertions); - test.set_assertion::(para_to_para_through_relay_receiver_assertions); + test.set_assertion::(para_to_para_through_hop_receiver_assertions); test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_assets_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index e13300b7c114..919e0080ba62 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -17,7 +17,10 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocationV3::get()); + let asset_native = Box::new( + v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) + .expect("conversion works"), + ); let asset_one = Box::new(v3::Location::new( 0, [ @@ -112,10 +115,9 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - let asset_native = - Box::new(v3::Location::try_from(RelayLocation::get()).expect("conversion works")); + let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion works"); + v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).unwrap(); let foreign_asset_at_asset_hub_rococo = v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) @@ -228,11 +230,9 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = Box::new(asset_hub_rococo_runtime::xcm_config::TokenLocationV3::get()); - let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocationV3::get(); - asset_one - .append_with(v3::Junction::GeneralIndex(ASSET_ID.into())) - .expect("pool assets"); + let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); + let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocation::get(); + asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubRococo::execute_with(|| { let pool_owner_account_id = asset_hub_rococo_runtime::AssetConversionOrigin::get(); @@ -255,8 +255,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - asset_native, - Box::new(asset_one), + Box::new(v3::Location::try_from(asset_native).expect("conversion works")), + Box::new(v3::Location::try_from(asset_one).expect("conversion works")), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -265,7 +265,9 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocationV3::get(); + let asset_native = + v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) + .expect("conversion works"); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 1cbb7fb8c193..f74378d7631a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -110,8 +110,7 @@ fn para_dest_assertions(t: RelayToSystemParaTest) { fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let expected_asset_id = t.args.asset_id.unwrap(); let (_, expected_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); @@ -204,8 +203,7 @@ fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { let (_, expected_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); let checking_account = ::PolkadotXcm::check_account(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); PenpalA::assert_xcmp_queue_success(None); @@ -414,29 +412,28 @@ fn teleport_to_other_system_parachains_works() { ); } -/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work -/// (using native reserve-based transfer for fees) -#[test] -fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets while paying +/// fees using (reserve transferred) native asset. +pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_ah_dispatchable: fn(ParaToSystemParaTest) -> DispatchResult, + ah_to_para_dispatchable: fn(SystemParaToParaTest) -> DispatchResult, +) { // Init values for Parachain let fee_amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; - let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion works"); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); let asset_id_on_penpal = match asset_location_on_penpal.last() { - Some(v3::Junction::GeneralIndex(id)) => *id as u32, + Some(Junction::GeneralIndex(id)) => *id as u32, _ => unreachable!(), }; let asset_amount_to_send = ASSET_HUB_ROCOCO_ED * 1000; let asset_owner = PenpalAssetOwner::get(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let sender = PenpalASender::get(); let penpal_check_account = ::PolkadotXcm::check_account(); let ah_as_seen_by_penpal = PenpalA::sibling_location_of(AssetHubRococo::para_id()); - let asset_location_on_penpal_latest: Location = asset_location_on_penpal.try_into().unwrap(); let penpal_assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (asset_location_on_penpal_latest, asset_amount_to_send).into(), + (asset_location_on_penpal.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = penpal_assets @@ -448,7 +445,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner.clone()), - system_para_native_asset_location, + system_para_native_asset_location.clone(), sender.clone(), fee_amount_to_send * 2, ); @@ -472,7 +469,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // Init values for System Parachain let foreign_asset_at_asset_hub_rococo = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); let penpal_to_ah_beneficiary_id = AssetHubRococoReceiver::get(); @@ -494,7 +491,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let penpal_sender_balance_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalASender::get(), ) }); @@ -508,20 +505,20 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_receiver_assets_before = AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.clone().try_into().unwrap(), &AssetHubRococoReceiver::get(), ) }); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_sender_assertions); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_receiver_assertions); - penpal_to_ah.set_dispatchable::(para_to_system_para_transfer_assets); + penpal_to_ah.set_dispatchable::(para_to_ah_dispatchable); penpal_to_ah.assert(); let penpal_sender_balance_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalASender::get(), ) }); @@ -535,7 +532,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_receiver_assets_after = AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.clone().try_into().unwrap(), &AssetHubRococoReceiver::get(), ) }); @@ -563,19 +560,17 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { type ForeignAssets = ::ForeignAssets; assert_ok!(ForeignAssets::transfer( ::RuntimeOrigin::signed(AssetHubRococoReceiver::get()), - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.clone().try_into().unwrap(), AssetHubRococoSender::get().into(), asset_amount_to_send, )); }); - let foreign_asset_at_asset_hub_rococo_latest: Location = - foreign_asset_at_asset_hub_rococo.try_into().unwrap(); let ah_to_penpal_beneficiary_id = PenpalAReceiver::get(); let penpal_as_seen_by_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let ah_assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (foreign_asset_at_asset_hub_rococo_latest, asset_amount_to_send).into(), + (foreign_asset_at_asset_hub_rococo.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = ah_assets @@ -603,7 +598,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let penpal_receiver_balance_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalAReceiver::get(), ) }); @@ -611,7 +606,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_sender_assets_before = AssetHubRococo::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.clone().try_into().unwrap(), &AssetHubRococoSender::get(), ) }); @@ -622,7 +617,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_sender_assertions); ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_receiver_assertions); - ah_to_penpal.set_dispatchable::(system_para_to_para_transfer_assets); + ah_to_penpal.set_dispatchable::(ah_to_para_dispatchable); ah_to_penpal.assert(); let ah_sender_balance_after = ah_to_penpal.sender.balance; @@ -637,7 +632,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_sender_assets_after = AssetHubRococo::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.try_into().unwrap(), &AssetHubRococoSender::get(), ) }); @@ -660,3 +655,13 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // Receiver's balance is increased by exact amount assert_eq!(penpal_receiver_assets_after, penpal_receiver_assets_before + asset_amount_to_send); } + +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work +/// (using native reserve-based transfer for fees) +#[test] +fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { + do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_system_para_transfer_assets, + system_para_to_para_transfer_assets, + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs new file mode 100644 index 000000000000..01bf40ae8fdf --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -0,0 +1,270 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use emulated_integration_tests_common::accounts::{ALICE, BOB}; +use frame_support::{ + dispatch::RawOrigin, + sp_runtime::traits::Dispatchable, + traits::{ + fungible::Inspect, + fungibles::{Create, Inspect as FungiblesInspect, Mutate}, + }, +}; +use parachains_common::AccountId; +use polkadot_runtime_common::impls::VersionedLocatableAsset; +use rococo_runtime::OriginCaller; +use rococo_runtime_constants::currency::GRAND; +use xcm_executor::traits::ConvertLocation; + +// Fund Treasury account on Asset Hub from Treasury account on Relay Chain with ROCs. +#[test] +fn spend_roc_on_asset_hub() { + // initial treasury balance on Asset Hub in ROCs. + let treasury_balance = 9_000 * GRAND; + // the balance spend on Asset Hub. + let treasury_spend_balance = 1_000 * GRAND; + + let init_alice_balance = AssetHubRococo::execute_with(|| { + <::Balances as Inspect<_>>::balance( + &AssetHubRococo::account_id_of(ALICE), + ) + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type Runtime = ::Runtime; + type Balances = ::Balances; + type Treasury = ::Treasury; + + // Fund Treasury account on Asset Hub with ROCs. + + let root = ::RuntimeOrigin::root(); + let treasury_account = Treasury::account_id(); + + // Mint assets to Treasury account on Relay Chain. + assert_ok!(Balances::force_set_balance( + root.clone(), + treasury_account.clone().into(), + treasury_balance * 2, + )); + + let native_asset = Location::here(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let treasury_location: Location = (Parent, PalletInstance(18)).into(); + + let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { + as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { + dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::V4(treasury_location)), + assets: bx!(VersionedAssets::V4( + Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() + )), + fee_asset_item: 0, + })), + }); + + // Dispatched from Root to `despatch_as` `Signed(treasury_account)`. + assert_ok!(teleport_call.dispatch(root)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type Treasury = ::Treasury; + + // Fund Alice account from Rococo Treasury account on Asset Hub. + + let treasury_origin: RuntimeOrigin = + rococo_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + + let alice_location: Location = + [Junction::AccountId32 { network: None, id: Rococo::account_id_of(ALICE).into() }] + .into(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let native_asset = Location::parent(); + + let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location.clone(), + asset_id: native_asset.into(), + }), + amount: treasury_spend_balance, + beneficiary: bx!(VersionedLocation::V4(alice_location)), + valid_from: None, + }); + + assert_ok!(treasury_spend_call.dispatch(treasury_origin)); + + // Claim the spend. + + let bob_signed = RuntimeOrigin::signed(Rococo::account_id_of(BOB)); + assert_ok!(Treasury::payout(bob_signed.clone(), 0)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to Alice account. + + let alice_account = AssetHubRococo::account_id_of(ALICE); + assert_eq!( + >::balance(&alice_account), + treasury_spend_balance + init_alice_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); +} + +#[test] +fn create_and_claim_treasury_spend_in_usdt() { + const ASSET_ID: u32 = 1984; + const SPEND_AMOUNT: u128 = 1_000_000; + // treasury location from a sibling parachain. + let treasury_location: Location = Location::new(1, PalletInstance(18)); + // treasury account on a sibling parachain. + let treasury_account = + asset_hub_rococo_runtime::xcm_config::LocationToAccountId::convert_location( + &treasury_location, + ) + .unwrap(); + let asset_hub_location = + v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); + let root = ::RuntimeOrigin::root(); + // asset kind to be spend from the treasury. + let asset_kind = VersionedLocatableAsset::V3 { + location: asset_hub_location, + asset_id: v3::AssetId::Concrete( + (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(ASSET_ID.into())).into(), + ), + }; + // treasury spend beneficiary. + let alice: AccountId = Rococo::account_id_of(ALICE); + let bob: AccountId = Rococo::account_id_of(BOB); + let bob_signed = ::RuntimeOrigin::signed(bob.clone()); + + AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + + // create an asset class and mint some assets to the treasury account. + assert_ok!(>::create( + ASSET_ID, + treasury_account.clone(), + true, + SPEND_AMOUNT / 2 + )); + assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + // beneficiary has zero balance. + assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Treasury = ::Treasury; + type AssetRate = ::AssetRate; + + // create a conversion rate from `asset_kind` to the native currency. + assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + + // create and approve a treasury spend. + assert_ok!(Treasury::spend( + root, + Box::new(asset_kind), + SPEND_AMOUNT, + Box::new(Location::new(0, Into::<[u8; 32]>::into(alice.clone())).into()), + None, + )); + // claim the spend. + assert_ok!(Treasury::payout(bob_signed.clone(), 0)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Assets = ::Assets; + + // assert events triggered by xcm pay program + // 1. treasury asset transferred to spend beneficiary + // 2. response to Relay Chain treasury pallet instance sent back + // 3. XCM program completed + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { + id: id == &ASSET_ID, + from: from == &treasury_account, + to: to == &alice, + amount: amount == &SPEND_AMOUNT, + }, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + // beneficiary received the assets from the treasury. + assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Treasury = ::Treasury; + + // check the payment status to ensure the response from the AssetHub was received. + assert_ok!(Treasury::check_status(bob_signed, 0)); + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::SpendProcessed { .. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index c9f5fe0647e1..e687251c14f9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -30,6 +30,7 @@ mod imports { prelude::{AccountId32 as AccountId32Junction, *}, v3, }; + pub use xcm_executor::traits::TransferType; // Cumulus pub use asset_test_utils::xcm_helpers; @@ -85,6 +86,7 @@ mod imports { pub type SystemParaToParaTest = Test; pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; + pub type ParaToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs new file mode 100644 index 000000000000..8cfda37c84c9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs @@ -0,0 +1,629 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::reserve_transfer::*; +use crate::{ + imports::*, + tests::teleport::do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt, +}; + +fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_a_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + let sov_penpal_b_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + + assert_expected_events!( + AssetHubWestend, + vec![ + // Withdrawn from sender parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_penpal_a_on_ah, + amount: *amount == t.args.amount, + }, + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_b_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::LocalReserve), + bx!(fee.id.into()), + bx!(TransferType::LocalReserve), + t.args.weight_limit, + ) +} + +fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + t.args.weight_limit, + ) +} + +fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), + bx!(fee.id.into()), + bx!(TransferType::RemoteReserve(asset_hub_location.into())), + t.args.weight_limit, + ) +} + +fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + t.args.weight_limit, + ) +} + +fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + ::PolkadotXcm::transfer_assets_using_type( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::LocalReserve), + t.args.weight_limit, + ) +} + +// =========================================================================== +// ======= Transfer - Native + Bridged Assets - AssetHub->Parachain ========== +// =========================================================================== +/// Transfers of native asset plus bridged asset from AssetHub to some Parachain +/// while paying fees using native asset. +#[test] +fn transfer_foreign_assets_from_asset_hub_to_para() { + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let sender = AssetHubWestendSender::get(); + let native_amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + let assets_owner = PenpalAssetOwner::get(); + // Foreign asset used: bridged ROC + let foreign_amount_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; + let roc_at_westend_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + + // Configure destination chain to trust AH as reserve of ROC + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Rococo)]).encode(), + )], + )); + }); + PenpalA::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubWestend::force_create_foreign_asset( + roc_at_westend_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubWestend::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + roc_at_westend_parachains.clone().try_into().unwrap(), + sender.clone(), + foreign_amount_to_send * 2, + ); + + // Assets to send + let assets: Vec = vec![ + (Parent, native_amount_to_send).into(), + (roc_at_westend_parachains.clone(), foreign_amount_to_send).into(), + ]; + let fee_asset_id = AssetId(Parent.into()); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + native_amount_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = SystemParaToParaTest::new(test_args); + + // Query initial balances + let sender_balance_before = test.sender.balance; + let sender_rocs_before = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sender, + ) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location.clone(), &receiver) + }); + let receiver_rocs_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &receiver) + }); + + // Set assertions and dispatchables + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); + test.set_dispatchable::(ah_to_para_transfer_assets); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sender_rocs_after = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sender, + ) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location, &receiver) + }); + let receiver_rocs_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - native_amount_to_send); + // Sender's balance is reduced by foreign amount sent + assert_eq!(sender_rocs_after, sender_rocs_before - foreign_amount_to_send); + // Receiver's assets is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + native_amount_to_send); + // Receiver's balance is increased by foreign amount sent + assert_eq!(receiver_rocs_after, receiver_rocs_before + foreign_amount_to_send); +} + +/// Reserve Transfers of native asset from Parachain to System Parachain should work +// =========================================================================== +// ======= Transfer - Native + Bridged Assets - Parachain->AssetHub ========== +// =========================================================================== +/// Transfers of native asset plus bridged asset from some Parachain to AssetHub +/// while paying fees using native asset. +#[test] +fn transfer_foreign_assets_from_para_to_asset_hub() { + // Init values for Parachain + let destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + let sender = PenpalASender::get(); + let native_amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 10000; + let native_asset_location = RelayLocation::get(); + let assets_owner = PenpalAssetOwner::get(); + + // Foreign asset used: bridged ROC + let foreign_amount_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; + let roc_at_westend_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + + // Configure destination chain to trust AH as reserve of ROC + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Rococo)]).encode(), + )], + )); + }); + PenpalA::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + AssetHubWestend::force_create_foreign_asset( + roc_at_westend_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + + // fund Parachain's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + native_asset_location.clone(), + sender.clone(), + native_amount_to_send * 2, + ); + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + roc_at_westend_parachains.clone(), + sender.clone(), + foreign_amount_to_send * 2, + ); + + // Init values for System Parachain + let receiver = AssetHubWestendReceiver::get(); + let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = + AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahr); + + // fund Parachain's SA on AssetHub with the assets held in reserve + AssetHubWestend::fund_accounts(vec![( + sov_penpal_on_ahr.clone().into(), + native_amount_to_send * 2, + )]); + AssetHubWestend::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + roc_at_westend_parachains.clone().try_into().unwrap(), + sov_penpal_on_ahr, + foreign_amount_to_send * 2, + ); + + // Assets to send + let assets: Vec = vec![ + (Parent, native_amount_to_send).into(), + (roc_at_westend_parachains.clone(), foreign_amount_to_send).into(), + ]; + let fee_asset_id = AssetId(Parent.into()); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + native_amount_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = ParaToSystemParaTest::new(test_args); + + // Query initial balances + let sender_native_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location.clone(), &sender) + }); + let sender_rocs_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &sender) + }); + let receiver_native_before = test.receiver.balance; + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &receiver, + ) + }); + + // Set assertions and dispatchables + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_ah_transfer_assets); + test.assert(); + + // Query final balances + let sender_native_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(native_asset_location, &sender) + }); + let sender_rocs_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &sender) + }); + let receiver_native_after = test.receiver.balance; + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.try_into().unwrap(), + &receiver, + ) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_native_after < sender_native_before - native_amount_to_send); + // Sender's balance is reduced by foreign amount sent + assert_eq!(sender_rocs_after, sender_rocs_before - foreign_amount_to_send); + // Receiver's balance is increased + assert!(receiver_native_after > receiver_native_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_native_after < receiver_native_before + native_amount_to_send); + // Receiver's balance is increased by foreign amount sent + assert_eq!(receiver_rocs_after, receiver_rocs_before + foreign_amount_to_send); +} + +// ============================================================================== +// ===== Transfer - Native + Bridged Assets - Parachain->AssetHub->Parachain ==== +// ============================================================================== +/// Transfers of native asset plus bridged asset from Parachain to Parachain +/// (through AssetHub reserve) with fees paid using native asset. +#[test] +fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { + // Init values for Parachain Origin + let destination = PenpalA::sibling_location_of(PenpalB::para_id()); + let sender = PenpalASender::get(); + let wnd_to_send: Balance = WESTEND_ED * 10000; + let assets_owner = PenpalAssetOwner::get(); + let wnd_location = RelayLocation::get(); + let sender_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let sov_of_sender_on_ah = AssetHubWestend::sovereign_account_id_of(sender_as_seen_by_ah); + let receiver_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_of_receiver_on_ah = AssetHubWestend::sovereign_account_id_of(receiver_as_seen_by_ah); + let roc_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; + + // Configure destination chain to trust AH as reserve of ROC + PenpalB::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Rococo)]).encode(), + )], + )); + }); + + // Register ROC as foreign asset and transfer it around the Westend ecosystem + let roc_at_westend_parachains = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + AssetHubWestend::force_create_foreign_asset( + roc_at_westend_parachains.clone().try_into().unwrap(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + PenpalA::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + PenpalB::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + assets_owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + + // fund Parachain's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + wnd_location.clone(), + sender.clone(), + wnd_to_send * 2, + ); + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner.clone()), + roc_at_westend_parachains.clone(), + sender.clone(), + roc_to_send * 2, + ); + // fund the Parachain Origin's SA on Asset Hub with the assets held in reserve + AssetHubWestend::fund_accounts(vec![(sov_of_sender_on_ah.clone().into(), wnd_to_send * 2)]); + AssetHubWestend::mint_foreign_asset( + ::RuntimeOrigin::signed(assets_owner), + roc_at_westend_parachains.clone().try_into().unwrap(), + sov_of_sender_on_ah.clone(), + roc_to_send * 2, + ); + + // Init values for Parachain Destination + let receiver = PenpalBReceiver::get(); + + // Assets to send + let assets: Vec = vec![ + (wnd_location.clone(), wnd_to_send).into(), + (roc_at_westend_parachains.clone(), roc_to_send).into(), + ]; + let fee_asset_id: AssetId = wnd_location.clone().into(); + let fee_asset_item = assets.iter().position(|a| a.id == fee_asset_id).unwrap() as u32; + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination, + receiver.clone(), + wnd_to_send, + assets.into(), + None, + fee_asset_item, + ), + }; + let mut test = ParaToParaThroughAHTest::new(test_args); + + // Query initial balances + let sender_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_location.clone(), &sender) + }); + let sender_rocs_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &sender) + }); + let wnds_in_sender_reserve_on_ah_before = + ::account_data_of(sov_of_sender_on_ah.clone()).free; + let rocs_in_sender_reserve_on_ah_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sov_of_sender_on_ah, + ) + }); + let wnds_in_receiver_reserve_on_ah_before = + ::account_data_of(sov_of_receiver_on_ah.clone()).free; + let rocs_in_receiver_reserve_on_ah_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sov_of_receiver_on_ah, + ) + }); + let receiver_wnds_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_location.clone(), &receiver) + }); + let receiver_rocs_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &receiver) + }); + + // Set assertions and dispatchables + test.set_assertion::(para_to_para_through_hop_sender_assertions); + test.set_assertion::(para_to_para_assethub_hop_assertions); + test.set_assertion::(para_to_para_through_hop_receiver_assertions); + test.set_dispatchable::(para_to_para_transfer_assets_through_ah); + test.assert(); + + // Query final balances + let sender_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_location.clone(), &sender) + }); + let sender_rocs_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone(), &sender) + }); + let rocs_in_sender_reserve_on_ah_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sov_of_sender_on_ah, + ) + }); + let wnds_in_sender_reserve_on_ah_after = + ::account_data_of(sov_of_sender_on_ah).free; + let rocs_in_receiver_reserve_on_ah_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_westend_parachains.clone().try_into().unwrap(), + &sov_of_receiver_on_ah, + ) + }); + let wnds_in_receiver_reserve_on_ah_after = + ::account_data_of(sov_of_receiver_on_ah).free; + let receiver_wnds_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_location, &receiver) + }); + let receiver_rocs_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_wnds_after < sender_wnds_before - wnd_to_send); + assert_eq!(sender_rocs_after, sender_rocs_before - roc_to_send); + // Sovereign accounts on reserve are changed accordingly + assert_eq!( + wnds_in_sender_reserve_on_ah_after, + wnds_in_sender_reserve_on_ah_before - wnd_to_send + ); + assert_eq!( + rocs_in_sender_reserve_on_ah_after, + rocs_in_sender_reserve_on_ah_before - roc_to_send + ); + assert!(wnds_in_receiver_reserve_on_ah_after > wnds_in_receiver_reserve_on_ah_before); + assert_eq!( + rocs_in_receiver_reserve_on_ah_after, + rocs_in_receiver_reserve_on_ah_before + roc_to_send + ); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + assert_eq!(receiver_rocs_after, receiver_rocs_before + roc_to_send); +} + +// ============================================================================================== +// ==== Bidirectional Transfer - Native + Teleportable Foreign Assets - Parachain<->AssetHub ==== +// ============================================================================================== +/// Transfers of native asset plus teleportable foreign asset from Parachain to AssetHub and back +/// with fees paid using native asset. +#[test] +fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explicit_transfer_types() { + do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_asset_hub_teleport_foreign_assets, + asset_hub_to_para_teleport_foreign_assets, + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs index 3cd7c9c46d69..e463e21e9e52 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs @@ -14,6 +14,7 @@ // limitations under the License. mod fellowship_treasury; +mod foreign_assets_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index a26dfef8e8e7..df01eb0d48ad 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -47,7 +47,7 @@ fn para_to_relay_sender_assertions(t: ParaToRelayTest) { RuntimeEvent::ForeignAssets( pallet_assets::Event::Burned { asset_id, owner, balance, .. } ) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, @@ -55,70 +55,92 @@ fn para_to_relay_sender_assertions(t: ParaToRelayTest) { ); } -fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { +pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 864_610_000, - 8_799, - ))); - + AssetHubWestend::assert_xcm_pallet_attempted_complete(None); + + let sov_acc_of_dest = AssetHubWestend::sovereign_account_id_of(t.args.dest.clone()); + for (idx, asset) in t.args.assets.into_inner().into_iter().enumerate() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + if idx == t.args.fee_asset_item as usize { + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount of native asset is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == sov_acc_of_dest, + amount: *amount == asset_amount, + }, + ] + ); + } else { + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount of foreign asset is transferred to Parachain's Sovereign account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Transferred { asset_id, from, to, amount }, + ) => { + asset_id: *asset_id == expected_id, + from: *from == t.sender.account_id, + to: *to == sov_acc_of_dest, + amount: *amount == asset_amount, + }, + ] + ); + } + } assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from, to, amount } - ) => { - from: *from == t.sender.account_id, - to: *to == AssetHubWestend::sovereign_account_id_of( - t.args.dest.clone() - ), - amount: *amount == t.args.amount, - }, // Transport fees are paid - RuntimeEvent::PolkadotXcm( - pallet_xcm::Event::FeesPaid { .. } - ) => {}, + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); AssetHubWestend::assert_xcm_pallet_sent(); } -fn system_para_to_para_receiver_assertions(t: SystemParaToParaTest) { +pub fn system_para_to_para_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); PenpalA::assert_xcmp_queue_success(None); - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == system_para_native_asset_location, - owner: *owner == t.receiver.account_id, - }, - ] - ); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0.try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } } -fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { +pub fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); - assert_expected_events!( - PenpalA, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::ForeignAssets( - pallet_assets::Event::Burned { asset_id, owner, balance, .. } - ) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), - owner: *owner == t.sender.account_id, - balance: *balance == t.args.amount, - }, - ] - ); + PenpalA::assert_xcm_pallet_attempted_complete(None); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0; + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance } + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.sender.account_id, + balance: *balance == asset_amount, + }, + ] + ); + } } fn para_to_relay_receiver_assertions(t: ParaToRelayTest) { @@ -150,25 +172,57 @@ fn para_to_relay_receiver_assertions(t: ParaToRelayTest) { ); } -fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { +pub fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalA::para_id()), - ); - AssetHubWestend::assert_xcmp_queue_success(None); + let sov_acc_of_penpal = AssetHubWestend::sovereign_account_id_of(t.args.dest.clone()); + for (idx, asset) in t.args.assets.into_inner().into_iter().enumerate() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let asset_amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + if idx == t.args.fee_asset_item as usize { + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount of native is withdrawn from Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_acc_of_penpal.clone().into(), + amount: *amount == asset_amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == t.receiver.account_id, + }, + ] + ); + } else { + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount of foreign asset is transferred from Parachain's Sovereign account + // to Receiver's account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == sov_acc_of_penpal, + balance: *balance == asset_amount, + }, + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Issued { asset_id, owner, amount }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + amount: *amount == asset_amount, + }, + ] + ); + } + } assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is withdrawn from Parachain's Sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahr.clone().into(), - amount: *amount == t.args.amount, - }, - RuntimeEvent::Balances(pallet_balances::Event::Minted { .. }) => {}, RuntimeEvent::MessageQueue( pallet_message_queue::Event::Processed { success: true, .. } ) => {}, @@ -212,10 +266,8 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let reservable_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let reservable_asset_location = PenpalLocalReservableFromAssetHub::get(); PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8799))); assert_expected_events!( PenpalA, @@ -245,14 +297,13 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_asset_location = PenpalLocalReservableFromAssetHub::get(); PenpalA::assert_xcmp_queue_success(None); assert_expected_events!( PenpalA, vec![ RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.receiver.account_id, }, RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { @@ -304,7 +355,7 @@ fn relay_to_para_assets_receiver_assertions(t: RelayToParaTest) { PenpalA, vec![ RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == v3::Location::try_from(RelayLocation::get()).expect("conversion works"), + asset_id: *asset_id == RelayLocation::get(), owner: *owner == t.receiver.account_id, }, RuntimeEvent::MessageQueue( @@ -314,29 +365,27 @@ fn relay_to_para_assets_receiver_assertions(t: RelayToParaTest) { ); } -fn para_to_para_through_relay_sender_assertions(t: ParaToParaThroughRelayTest) { +pub fn para_to_para_through_hop_sender_assertions(t: Test) { type RuntimeEvent = ::RuntimeEvent; - - let relay_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - PenpalA::assert_xcm_pallet_attempted_complete(None); - // XCM sent to relay reserve - PenpalA::assert_parachain_system_ump_sent(); - assert_expected_events!( - PenpalA, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereign account - RuntimeEvent::ForeignAssets( - pallet_assets::Event::Burned { asset_id, owner, balance }, - ) => { - asset_id: *asset_id == relay_asset_location, - owner: *owner == t.sender.account_id, - balance: *balance == t.args.amount, - }, - ] - ); + for asset in t.args.assets.into_inner() { + let expected_id = asset.id.0.clone().try_into().unwrap(); + let amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Burned { asset_id, owner, balance }, + ) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.sender.account_id, + balance: *balance == amount, + }, + ] + ); + } } fn para_to_para_relay_hop_assertions(t: ParaToParaThroughRelayTest) { @@ -369,22 +418,22 @@ fn para_to_para_relay_hop_assertions(t: ParaToParaThroughRelayTest) { ); } -fn para_to_para_through_relay_receiver_assertions(t: ParaToParaThroughRelayTest) { +pub fn para_to_para_through_hop_receiver_assertions(t: Test) { type RuntimeEvent = ::RuntimeEvent; - let relay_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); PenpalB::assert_xcmp_queue_success(None); - - assert_expected_events!( - PenpalB, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == relay_asset_location, - owner: *owner == t.receiver.account_id, - }, - ] - ); + for asset in t.args.assets.into_inner().into_iter() { + let expected_id = asset.id.0.try_into().unwrap(); + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } } fn relay_to_para_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { @@ -526,8 +575,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let amount_to_send: Balance = WESTEND_ED * 1000; // Init values fot Parachain - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); // Init Test @@ -542,7 +590,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender_balance_before = test.sender.balance; let receiver_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &receiver) + >::balance(relay_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables @@ -555,7 +603,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &receiver) + >::balance(relay_native_asset_location, &receiver) }); // Sender's balance is reduced by amount sent plus delivery fees @@ -577,13 +625,12 @@ fn reserve_transfer_native_asset_from_para_to_relay() { let amount_to_send: Balance = WESTEND_ED * 1000; let assets: Assets = (Parent, amount_to_send).into(); let asset_owner = PenpalAssetOwner::get(); - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - relay_native_asset_location, + relay_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -614,7 +661,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; @@ -627,7 +674,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.into(), &sender) + >::balance(relay_native_asset_location, &sender) }); let receiver_balance_after = test.receiver.balance; @@ -654,8 +701,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let assets: Assets = (Parent, amount_to_send).into(); // Init values for Parachain - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); // Init Test @@ -677,7 +723,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let sender_balance_before = test.sender.balance; let receiver_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.into(), &receiver) + >::balance(system_para_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables @@ -711,14 +757,13 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { let sender = PenpalASender::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets: Assets = (Parent, amount_to_send).into(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let asset_owner = PenpalAssetOwner::get(); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - system_para_native_asset_location, + system_para_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -750,7 +795,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_para_native_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; @@ -818,10 +863,8 @@ fn reserve_transfer_assets_from_system_para_to_para() { // Init values for Parachain let receiver = PenpalAReceiver::get(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let system_para_foreign_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); // Init Test let para_test_args = TestContext { @@ -846,11 +889,14 @@ fn reserve_transfer_assets_from_system_para_to_para() { }); let receiver_system_native_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &receiver) + >::balance(system_para_native_asset_location.clone(), &receiver) }); let receiver_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &receiver) + >::balance( + system_para_foreign_asset_location.clone(), + &receiver, + ) }); // Set assertions and dispatchables @@ -905,14 +951,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { let asset_amount_to_send = ASSET_HUB_WESTEND_ED * 100; let penpal_asset_owner = PenpalAssetOwner::get(); let penpal_asset_owner_signer = ::RuntimeOrigin::signed(penpal_asset_owner); - let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); - let asset_location_on_penpal_latest: Location = asset_location_on_penpal.try_into().unwrap(); - let system_asset_location_on_penpal = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let asset_location_on_penpal = PenpalLocalReservableFromAssetHub::get(); + let system_asset_location_on_penpal = RelayLocation::get(); let assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (asset_location_on_penpal_latest, asset_amount_to_send).into(), + (asset_location_on_penpal.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = assets @@ -940,10 +983,8 @@ fn reserve_transfer_assets_from_para_to_system_para() { let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let system_para_foreign_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); + let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); let ah_asset_owner = AssetHubWestendAssetOwner::get(); let ah_asset_owner_signer = ::RuntimeOrigin::signed(ah_asset_owner); @@ -978,11 +1019,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query initial balances let sender_system_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_para_native_asset_location.clone(), &sender) }); let sender_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &sender) + >::balance(system_para_foreign_asset_location.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; let receiver_assets_before = AssetHubWestend::execute_with(|| { @@ -1031,22 +1072,21 @@ fn reserve_transfer_assets_from_para_to_system_para() { /// Reserve Transfers of native asset from Parachain to Parachain (through Relay reserve) should /// work #[test] -fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { +fn reserve_transfer_native_asset_from_para_to_para_through_relay() { // Init values for Parachain Origin let destination = PenpalA::sibling_location_of(PenpalB::para_id()); let sender = PenpalASender::get(); let amount_to_send: Balance = WESTEND_ED * 10000; let asset_owner = PenpalAssetOwner::get(); let assets = (Parent, amount_to_send).into(); - let relay_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let relay_native_asset_location = RelayLocation::get(); let sender_as_seen_by_relay = Westend::child_location_of(PenpalA::para_id()); let sov_of_sender_on_relay = Westend::sovereign_account_id_of(sender_as_seen_by_relay); // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner), - relay_native_asset_location, + relay_native_asset_location.clone(), sender.clone(), amount_to_send * 2, ); @@ -1068,24 +1108,24 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { // Query initial balances let sender_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_assets_before = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &receiver) + >::balance(relay_native_asset_location.clone(), &receiver) }); // Set assertions and dispatchables - test.set_assertion::(para_to_para_through_relay_sender_assertions); + test.set_assertion::(para_to_para_through_hop_sender_assertions); test.set_assertion::(para_to_para_relay_hop_assertions); - test.set_assertion::(para_to_para_through_relay_receiver_assertions); + test.set_assertion::(para_to_para_through_hop_receiver_assertions); test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &sender) + >::balance(relay_native_asset_location.clone(), &sender) }); let receiver_assets_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index aa673c03483a..31f763be6370 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -17,7 +17,10 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new(asset_hub_westend_runtime::xcm_config::WestendLocationV3::get()); + let asset_native = Box::new( + v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) + .expect("conversion works"), + ); let asset_one = Box::new(v3::Location { parents: 0, interior: [ @@ -111,8 +114,7 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - let asset_native = - Box::new(v3::Location::try_from(RelayLocation::get()).expect("conversion works")); + let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); let asset_location_on_penpal = v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion_works"); let foreign_asset_at_asset_hub_westend = @@ -227,11 +229,9 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = Box::new(asset_hub_westend_runtime::xcm_config::WestendLocationV3::get()); - let mut asset_one = asset_hub_westend_runtime::xcm_config::PoolAssetsPalletLocationV3::get(); - asset_one - .append_with(v3::Junction::GeneralIndex(ASSET_ID.into())) - .expect("pool assets"); + let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); + let mut asset_one = asset_hub_westend_runtime::xcm_config::PoolAssetsPalletLocation::get(); + asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubWestend::execute_with(|| { let pool_owner_account_id = asset_hub_westend_runtime::AssetConversionOrigin::get(); @@ -254,8 +254,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - asset_native, - Box::new(asset_one), + Box::new(v3::Location::try_from(asset_native).expect("conversion works")), + Box::new(v3::Location::try_from(asset_one).expect("conversion works")), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -264,7 +264,9 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocationV3::get(); + let asset_native = + v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) + .expect("conversion works"); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index ac518d2ed4a4..a524b87b2daf 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -110,8 +110,7 @@ fn para_dest_assertions(t: RelayToSystemParaTest) { fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let expected_asset_id = t.args.asset_id.unwrap(); let (_, expected_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); @@ -204,8 +203,7 @@ fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { let (_, expected_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); let checking_account = ::PolkadotXcm::check_account(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); PenpalA::assert_xcmp_queue_success(None); @@ -414,29 +412,28 @@ fn teleport_to_other_system_parachains_works() { ); } -/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work -/// (using native reserve-based transfer for fees) -#[test] -fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets while paying +/// fees using (reserve transferred) native asset. +pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_ah_dispatchable: fn(ParaToSystemParaTest) -> DispatchResult, + ah_to_para_dispatchable: fn(SystemParaToParaTest) -> DispatchResult, +) { // Init values for Parachain let fee_amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 100; - let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion works"); + let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); let asset_id_on_penpal = match asset_location_on_penpal.last() { - Some(v3::Junction::GeneralIndex(id)) => *id as u32, + Some(Junction::GeneralIndex(id)) => *id as u32, _ => unreachable!(), }; let asset_amount_to_send = ASSET_HUB_WESTEND_ED * 100; let asset_owner = PenpalAssetOwner::get(); - let system_para_native_asset_location = - v3::Location::try_from(RelayLocation::get()).expect("conversion works"); + let system_para_native_asset_location = RelayLocation::get(); let sender = PenpalASender::get(); let penpal_check_account = ::PolkadotXcm::check_account(); let ah_as_seen_by_penpal = PenpalA::sibling_location_of(AssetHubWestend::para_id()); - let asset_location_on_penpal_latest: Location = asset_location_on_penpal.try_into().unwrap(); let penpal_assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (asset_location_on_penpal_latest, asset_amount_to_send).into(), + (asset_location_on_penpal.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = penpal_assets @@ -448,7 +445,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // fund Parachain's sender account PenpalA::mint_foreign_asset( ::RuntimeOrigin::signed(asset_owner.clone()), - system_para_native_asset_location, + system_para_native_asset_location.clone(), sender.clone(), fee_amount_to_send * 2, ); @@ -475,7 +472,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // Init values for System Parachain let foreign_asset_at_asset_hub_westend = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); let penpal_to_ah_beneficiary_id = AssetHubWestendReceiver::get(); @@ -497,7 +494,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let penpal_sender_balance_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalASender::get(), ) }); @@ -511,20 +508,20 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_receiver_assets_before = AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), &AssetHubWestendReceiver::get(), ) }); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_sender_assertions); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_receiver_assertions); - penpal_to_ah.set_dispatchable::(para_to_system_para_transfer_assets); + penpal_to_ah.set_dispatchable::(para_to_ah_dispatchable); penpal_to_ah.assert(); let penpal_sender_balance_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalASender::get(), ) }); @@ -538,7 +535,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_receiver_assets_after = AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), &AssetHubWestendReceiver::get(), ) }); @@ -566,19 +563,17 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { type ForeignAssets = ::ForeignAssets; assert_ok!(ForeignAssets::transfer( ::RuntimeOrigin::signed(AssetHubWestendReceiver::get()), - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), AssetHubWestendSender::get().into(), asset_amount_to_send, )); }); - let foreign_asset_at_asset_hub_westend_latest: Location = - foreign_asset_at_asset_hub_westend.try_into().unwrap(); let ah_to_penpal_beneficiary_id = PenpalAReceiver::get(); let penpal_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let ah_assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (foreign_asset_at_asset_hub_westend_latest, asset_amount_to_send).into(), + (foreign_asset_at_asset_hub_westend.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = ah_assets @@ -606,7 +601,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let penpal_receiver_balance_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - system_para_native_asset_location, + system_para_native_asset_location.clone(), &PenpalAReceiver::get(), ) }); @@ -614,7 +609,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_sender_assets_before = AssetHubWestend::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), &AssetHubWestendSender::get(), ) }); @@ -625,7 +620,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_sender_assertions); ah_to_penpal.set_assertion::(ah_to_penpal_foreign_assets_receiver_assertions); - ah_to_penpal.set_dispatchable::(system_para_to_para_transfer_assets); + ah_to_penpal.set_dispatchable::(ah_to_para_dispatchable); ah_to_penpal.assert(); let ah_sender_balance_after = ah_to_penpal.sender.balance; @@ -640,7 +635,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { let ah_sender_assets_after = AssetHubWestend::execute_with(|| { type ForeignAssets = ::ForeignAssets; >::balance( - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), &AssetHubWestendSender::get(), ) }); @@ -663,3 +658,13 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { // Receiver's balance is increased by exact amount assert_eq!(penpal_receiver_assets_after, penpal_receiver_assets_before + asset_amount_to_send); } + +/// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets should work +/// (using native reserve-based transfer for fees) +#[test] +fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { + do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + para_to_system_para_transfer_assets, + system_para_to_para_transfer_assets, + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index b5e19cf3fa3a..0415af580ef8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -25,6 +25,7 @@ mod imports { prelude::{AccountId32 as AccountId32Junction, *}, v3::{self, NetworkId::Westend as WestendId}, }; + pub use xcm_executor::traits::TransferType; // Cumulus pub use emulated_integration_tests_common::{ @@ -46,7 +47,7 @@ mod imports { bridge_hub_rococo_emulated_chain::{ genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, }, - penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + penpal_emulated_chain::{PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner}, rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 787a82ed32f7..69d625be2804 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -31,6 +31,73 @@ fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: Location, amount: u assert_bridge_hub_westend_message_received(); } +fn send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( + id: Location, + transfer_amount: u128, +) { + let destination = asset_hub_westend_location(); + let local_asset_hub: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + + // fund the AHR's SA on BHR for paying bridge transport fees + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); + + // set XCM versions + PenpalA::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); + BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); + + // send message over bridge + assert_ok!(PenpalA::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get()); + let beneficiary: Location = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); + let assets: Assets = (id.clone(), transfer_amount).into(); + let fees_id: AssetId = id.into(); + + ::PolkadotXcm::transfer_assets_using_type( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.clone().into()), + bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), + bx!(fees_id.into()), + bx!(TransferType::RemoteReserve(local_asset_hub.into())), + WeightLimit::Unlimited, + ) + })); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == transfer_amount, + }, + // Amount deposited in AHW's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahw_on_ahr.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); +} + #[test] fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { let roc_at_asset_hub_rococo: v3::Location = v3::Parent.into(); @@ -45,7 +112,7 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { vec![], ); let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Westend, + Westend, AssetHubWestend::para_id(), ); @@ -101,9 +168,11 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) }); - let roc_at_asset_hub_rococo_latest: Location = roc_at_asset_hub_rococo.try_into().unwrap(); let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; - send_asset_from_asset_hub_rococo_to_asset_hub_westend(roc_at_asset_hub_rococo_latest, amount); + send_asset_from_asset_hub_rococo_to_asset_hub_westend( + roc_at_asset_hub_rococo.try_into().unwrap(), + amount, + ); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -135,7 +204,7 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { assert!(sender_rocs_before > sender_rocs_after); // Receiver's balance is increased assert!(receiver_rocs_after > receiver_rocs_before); - // Reserve balance is reduced by sent amount + // Reserve balance is increased by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); } @@ -144,7 +213,7 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let prefund_amount = 10_000_000_000_000u128; let wnd_at_asset_hub_rococo = v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Westend)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); AssetHubRococo::force_create_foreign_asset( wnd_at_asset_hub_rococo, owner, @@ -155,7 +224,7 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Rococo, + Rococo, AssetHubRococo::para_id(), ); AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); @@ -171,10 +240,9 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let receiver_wnds_before = ::account_data_of(AssetHubWestendReceiver::get()).free; - let wnd_at_asset_hub_rococo_latest: Location = wnd_at_asset_hub_rococo.try_into().unwrap(); let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; send_asset_from_asset_hub_rococo_to_asset_hub_westend( - wnd_at_asset_hub_rococo_latest.clone(), + Location::try_from(wnd_at_asset_hub_rococo).unwrap(), amount_to_send, ); AssetHubWestend::execute_with(|| { @@ -217,3 +285,95 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { // Reserve balance is reduced by sent amount assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); } + +#[test] +fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { + let roc_at_rococo_parachains: Location = Parent.into(); + let roc_at_asset_hub_westend = Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend.clone().try_into().unwrap(), + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + + let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount * 2)]); + // fund Penpal's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + roc_at_rococo_parachains.clone(), + PenpalASender::get(), + amount * 2, + ); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + let sender_rocs_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + roc_at_rococo_parachains.clone(), + &PenpalASender::get(), + ) + }); + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_asset_hub_westend.clone().try_into().unwrap(), + &AssetHubWestendReceiver::get(), + ) + }); + send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( + roc_at_rococo_parachains.clone(), + amount, + ); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == roc_at_rococo_parachains.clone().try_into().unwrap(), + owner: *owner == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_rococo_parachains, &PenpalASender::get()) + }); + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + roc_at_asset_hub_westend.try_into().unwrap(), + &AssetHubWestendReceiver::get(), + ) + }); + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_after < sender_rocs_before); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is increased by sent amount (less fess) + assert!(rocs_in_reserve_on_ahr_after > rocs_in_reserve_on_ahr_before); + assert!(rocs_in_reserve_on_ahr_after <= rocs_in_reserve_on_ahr_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 780ba57f78a1..e332eb5bfda7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -306,8 +306,6 @@ fn send_token_from_ethereum_to_penpal() { // The Weth asset location, identified by the contract address on Ethereum let weth_asset_location: Location = (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); - // Converts the Weth asset location into an asset ID - let weth_asset_id: v3::Location = weth_asset_location.try_into().unwrap(); let origin_location = (Parent, Parent, EthereumNetwork::get()).into(); @@ -321,12 +319,12 @@ fn send_token_from_ethereum_to_penpal() { PenpalA::execute_with(|| { assert_ok!(::ForeignAssets::create( ::RuntimeOrigin::signed(PenpalASender::get()), - weth_asset_id, + weth_asset_location.clone(), asset_hub_sovereign.into(), 1000, )); - assert!(::ForeignAssets::asset_exists(weth_asset_id)); + assert!(::ForeignAssets::asset_exists(weth_asset_location)); }); BridgeHubRococo::execute_with(|| { @@ -381,10 +379,8 @@ fn send_token_from_ethereum_to_penpal() { #[test] fn send_weth_asset_from_asset_hub_to_ethereum() { use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; - let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( - 1, - [Parachain(AssetHubRococo::para_id().into())], - )); + let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); + let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location); AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION)); BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION)); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 60c31ce5a4ae..36b846e10313 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -26,6 +26,7 @@ mod imports { v3, v4::NetworkId::Rococo as RococoId, }; + pub use xcm_executor::traits::TransferType; // Cumulus pub use emulated_integration_tests_common::{ @@ -48,13 +49,15 @@ mod imports { genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, }, + penpal_emulated_chain::{PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet}, westend_emulated_chain::WestendRelayPallet as WestendPallet, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, BridgeHubWestendPara as BridgeHubWestend, - BridgeHubWestendParaSender as BridgeHubWestendSender, WestendRelay as Westend, + BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB, + PenpalBParaSender as PenpalBSender, WestendRelay as Westend, }; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 5b0990973d21..3a8ce7d43f3e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -30,6 +30,73 @@ fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: Location, amount: u assert_bridge_hub_rococo_message_received(); } +fn send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( + id: Location, + transfer_amount: u128, +) { + let destination = asset_hub_rococo_location(); + let local_asset_hub: Location = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + Rococo, + AssetHubRococo::para_id(), + ); + + // fund the AHW's SA on BHW for paying bridge transport fees + BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); + + // set XCM versions + PenpalB::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); + BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); + + // send message over bridge + assert_ok!(PenpalB::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); + let beneficiary: Location = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); + let assets: Assets = (id.clone(), transfer_amount).into(); + let fees_id: AssetId = id.into(); + + ::PolkadotXcm::transfer_assets_using_type( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), + bx!(fees_id.into()), + bx!(TransferType::RemoteReserve(local_asset_hub.into())), + WeightLimit::Unlimited, + ) + })); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + amount: *amount == transfer_amount, + }, + // Amount deposited in AHR's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahr_on_ahw.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); +} + #[test] fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { let wnd_at_asset_hub_westend: Location = Parent.into(); @@ -44,7 +111,7 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { vec![], ); let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Rococo, + Rococo, AssetHubRococo::para_id(), ); @@ -153,7 +220,7 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Westend, + Westend, AssetHubWestend::para_id(), ); AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); @@ -169,10 +236,9 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { let receiver_rocs_before = ::account_data_of(AssetHubRococoReceiver::get()).free; - let roc_at_asset_hub_westend_latest: Location = roc_at_asset_hub_westend.try_into().unwrap(); let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; send_asset_from_asset_hub_westend_to_asset_hub_rococo( - roc_at_asset_hub_westend_latest.clone(), + roc_at_asset_hub_westend.try_into().unwrap(), amount_to_send, ); AssetHubRococo::execute_with(|| { @@ -215,3 +281,95 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { // Reserve balance is reduced by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); } + +#[test] +fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { + let wnd_at_westend_parachains: Location = Parent.into(); + let wnd_at_asset_hub_rococo = Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo.clone().try_into().unwrap(), + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + Rococo, + AssetHubRococo::para_id(), + ); + + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount * 2)]); + // fund Penpal's sender account + PenpalB::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + wnd_at_westend_parachains.clone(), + PenpalBSender::get(), + amount * 2, + ); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let sender_wnds_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_westend_parachains.clone(), + &PenpalBSender::get(), + ) + }); + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_asset_hub_rococo.clone().try_into().unwrap(), + &AssetHubRococoReceiver::get(), + ) + }); + send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( + wnd_at_westend_parachains.clone(), + amount, + ); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_westend_parachains.clone().try_into().unwrap(), + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_westend_parachains, &PenpalBSender::get()) + }); + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_asset_hub_rococo.try_into().unwrap(), + &AssetHubRococoReceiver::get(), + ) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + + // Sender's balance is reduced + assert!(sender_wnds_after < sender_wnds_before); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount (less fess) + assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before); + assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml new file mode 100644 index 000000000000..d1dbef9fc415 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "collectives-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Collectives Westend runtime integration tests with xcm-emulator" +publish = false + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } +pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } + +# Polkadot +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } +asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } +collectives-westend-runtime = { path = "../../../../../runtimes/collectives/collectives-westend" } +cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } +cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs new file mode 100644 index 000000000000..97239330216a --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use xcm::{prelude::*, v3}; + +pub use emulated_integration_tests_common::xcm_emulator::{ + assert_expected_events, bx, Chain, RelayChain as Relay, TestExt, +}; +pub use westend_system_emulated_network::{ + asset_hub_westend_emulated_chain::AssetHubWestendParaPallet as AssetHubWestendPallet, + collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubWestendPara as AssetHubWestend, CollectivesWestendPara as CollectivesWestend, + WestendRelay as Westend, +}; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs new file mode 100644 index 000000000000..bde1220e2495 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -0,0 +1,236 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId; +use emulated_integration_tests_common::accounts::ALICE; +use frame_support::{ + assert_ok, dispatch::RawOrigin, instances::Instance1, sp_runtime::traits::Dispatchable, + traits::fungible::Inspect, +}; +use polkadot_runtime_common::impls::VersionedLocatableAsset; +use westend_runtime::OriginCaller; +use westend_runtime_constants::currency::UNITS; +use xcm_executor::traits::ConvertLocation; + +// Fund Fellowship Treasury from Westend Treasury and spend from Fellowship Treasury. +#[test] +fn fellowship_treasury_spend() { + // initial treasury balance on Asset Hub in WNDs. + let treasury_balance = 20_000_000 * UNITS; + // target fellowship balance on Asset Hub in WNDs. + let fellowship_treasury_balance = 1_000_000 * UNITS; + // fellowship first spend balance in WNDs. + let fellowship_spend_balance = 10_000 * UNITS; + + let init_alice_balance = AssetHubWestend::execute_with(|| { + <::Balances as Inspect<_>>::balance( + &AssetHubWestend::account_id_of(ALICE), + ) + }); + + Westend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type Runtime = ::Runtime; + type Balances = ::Balances; + type Treasury = ::Treasury; + + // Fund Treasury account on Asset Hub with WNDs. + + let root = ::RuntimeOrigin::root(); + let treasury_account = Treasury::account_id(); + + // Mist assets to Treasury account on Relay Chain. + assert_ok!(Balances::force_set_balance( + root.clone(), + treasury_account.clone().into(), + treasury_balance * 2, + )); + + let native_asset = Location::here(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let treasury_location: Location = (Parent, PalletInstance(37)).into(); + + let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { + as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { + dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::V4(treasury_location)), + assets: bx!(VersionedAssets::V4( + Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() + )), + fee_asset_item: 0, + })), + }); + + // Dispatched from Root to `dispatch_as` `Signed(treasury_account)`. + assert_ok!(teleport_call.dispatch(root)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + Westend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type Treasury = ::Treasury; + + // Fund Fellowship Treasury from Westend Treasury. + + let treasury_origin: RuntimeOrigin = + westend_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + let fellowship_treasury_location: Location = + Location::new(1, [Parachain(1001), PalletInstance(65)]); + let asset_hub_location: Location = [Parachain(1000)].into(); + let native_asset = Location::parent(); + + let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location.clone(), + asset_id: native_asset.into(), + }), + amount: fellowship_treasury_balance, + beneficiary: bx!(VersionedLocation::V4(fellowship_treasury_location)), + valid_from: None, + }); + + assert_ok!(treasury_spend_call.dispatch(treasury_origin)); + + // Claim the spend. + + let alice_signed = RuntimeOrigin::signed(Westend::account_id_of(ALICE)); + assert_ok!(Treasury::payout(alice_signed.clone(), 0)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to the Fellowship Treasury account. + + let fellowship_treasury_location: Location = + Location::new(1, [Parachain(1001), PalletInstance(65)]); + let fellowship_treasury_account = + AssetHubLocationToAccountId::convert_location(&fellowship_treasury_location).unwrap(); + + assert_eq!( + >::balance(&fellowship_treasury_account), + fellowship_treasury_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); + + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type FellowshipTreasury = + ::FellowshipTreasury; + + // Fund Alice account from Fellowship Treasury. + + let fellows_origin: RuntimeOrigin = + collectives_westend_runtime::fellowship::pallet_fellowship_origins::Origin::Fellows + .into(); + let asset_hub_location: Location = (Parent, Parachain(1000)).into(); + let native_asset = Location::parent(); + + let alice_location: Location = [Junction::AccountId32 { + network: None, + id: CollectivesWestend::account_id_of(ALICE).into(), + }] + .into(); + + let fellowship_treasury_spend_call = + RuntimeCall::FellowshipTreasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location, + asset_id: native_asset.into(), + }), + amount: fellowship_spend_balance, + beneficiary: bx!(VersionedLocation::V4(alice_location)), + valid_from: None, + }); + + assert_ok!(fellowship_treasury_spend_call.dispatch(fellows_origin)); + + // Claim the spend. + + let alice_signed = RuntimeOrigin::signed(CollectivesWestend::account_id_of(ALICE)); + assert_ok!(FellowshipTreasury::payout(alice_signed.clone(), 0)); + + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to Alice account. + + let alice_account = AssetHubWestend::account_id_of(ALICE); + assert_eq!( + >::balance(&alice_account), + fellowship_spend_balance + init_alice_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); +} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs similarity index 77% rename from substrate/utils/frame/try-runtime/cli/src/commands/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs index 37902e676e3d..a9f65df34b64 100644 --- a/substrate/utils/frame/try-runtime/cli/src/commands/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs @@ -1,5 +1,3 @@ -// This file is part of Substrate. - // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -15,9 +13,4 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod create_snapshot; -pub mod execute_block; -pub mod fast_forward; -pub mod follow_chain; -pub mod offchain_worker; -pub mod on_runtime_upgrade; +mod fellowship_treasury; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 0733156716c1..e8be734214f4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -25,6 +25,7 @@ frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/r frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } @@ -120,6 +121,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -153,6 +155,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-assets/try-runtime", @@ -201,6 +204,7 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-assets/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 0ee98e6c22bf..30e211a8f1d0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -316,7 +316,7 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, xcm::v3::Location, >, @@ -333,6 +333,11 @@ pub type NativeAndAssets = fungible::UnionOf< AccountId, >; +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (xcm::v3::Location, xcm::v3::Location), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -340,8 +345,12 @@ impl pallet_asset_conversion::Config for Runtime { type AssetKind = xcm::v3::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); - type PoolLocator = - pallet_asset_conversion::WithFirstAsset; + type PoolLocator = pallet_asset_conversion::WithFirstAsset< + TokenLocationV3, + AccountId, + Self::AssetKind, + PoolIdToAccountId, + >; type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam @@ -362,6 +371,18 @@ impl pallet_asset_conversion::Config for Runtime { >; } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed< + ::PoolId, + >; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = weights::pallet_asset_conversion_ops::WeightInfo; +} + parameter_types! { // we just reuse the same deposits pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); @@ -934,6 +955,10 @@ construct_runtime!( #[cfg(feature = "state-trie-version-1")] StateTrieMigration: pallet_state_trie_migration = 70, + + // TODO: the pallet instance should be removed once all pools have migrated + // to the new account IDs. + AssetConversionMigration: pallet_asset_conversion_ops = 200, } ); @@ -961,6 +986,7 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. +#[allow(deprecated)] pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions, @@ -1054,6 +1080,7 @@ mod benches { [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] + [pallet_asset_conversion_ops, AssetConversionMigration] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index fa9e86102c61..f20790cde39c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_ops; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs new file mode 100644 index 000000000000..e85420d32d9c --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-15, STEPS: `10`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --steps=10 +// --repeat=2 +// --pallet=pallet-asset-conversion-ops +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_ops`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_ops::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1105` + // Estimated: `7404` + // Minimum execution time: 2_323_000_000 picoseconds. + Weight::from_parts(2_404_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 1c58abcb379e..fceb82b6b06b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -82,8 +82,6 @@ parameter_types! { PalletInstance(::index() as u8).into(); pub UniquesPalletLocation: Location = PalletInstance(::index() as u8).into(); - pub PoolAssetsPalletLocationV3: xcm::v3::Location = - xcm::v3::Junction::PalletInstance(::index() as u8).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); pub const GovernanceLocation: Location = Location::parent(); pub StakingPot: AccountId = CollatorSelection::account_id(); @@ -179,6 +177,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, + xcm::v3::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -581,7 +580,11 @@ impl xcm_executor::Config for XcmConfig { WeightToFee, crate::NativeAndAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation< + TrustBackedAssetsPalletLocation, + Balance, + xcm::v3::Location, + >, ForeignAssetsConvertedConcreteId, ), ResolveAssetTo, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 5fa7455ad2a0..f670c5f424ef 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -22,8 +22,7 @@ use asset_hub_rococo_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TokenLocation, TokenLocationV3, - TrustBackedAssetsPalletLocation, TrustBackedAssetsPalletLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, @@ -53,17 +52,14 @@ use sp_std::ops::Mul; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; -use xcm_builder::V4V3LocationConverter; +use xcm_builder::WithLatestLocationConverter; use xcm_executor::traits::{JustTry, WeightTrader}; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type AssetIdForTrustBackedAssetsConvertLatest = - assets_common::AssetIdForTrustBackedAssetsConvertLatest; + assets_common::AssetIdForTrustBackedAssetsConvert; type RuntimeHelper = asset_test_utils::RuntimeHelper; @@ -204,7 +200,7 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let asset_1: u32 = 1; - let native_location = TokenLocationV3::get(); + let native_location = TokenLocation::get(); let asset_1_location = AssetIdForTrustBackedAssetsConvert::convert_back(&asset_1).unwrap(); // bob's initial balance for native and `asset1` assets. @@ -221,14 +217,24 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(asset_1_location) + Box::new( + xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + ), + Box::new( + xcm::v3::Location::try_from(asset_1_location.clone()) + .expect("conversion works") + ) )); assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(asset_1_location), + Box::new( + xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + ), + Box::new( + xcm::v3::Location::try_from(asset_1_location.clone()) + .expect("conversion works") + ), pool_liquidity, pool_liquidity, 1, @@ -240,8 +246,6 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let asset_total_issuance = Assets::total_issuance(asset_1); let native_total_issuance = Balances::total_issuance(); - let asset_1_location_latest: Location = asset_1_location.try_into().unwrap(); - // prepare input to buy weight. let weight = Weight::from_parts(4_000_000_000, 0); let fee = WeightToFee::weight_to_fee(&weight); @@ -249,7 +253,7 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { AssetConversion::get_amount_in(&fee, &pool_liquidity, &pool_liquidity).unwrap(); let extra_amount = 100; let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - let payment: Asset = (asset_1_location_latest.clone(), asset_fee + extra_amount).into(); + let payment: Asset = (asset_1_location.clone(), asset_fee + extra_amount).into(); // init trader and buy weight. let mut trader = ::Trader::new(); @@ -257,24 +261,25 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { trader.buy_weight(weight, payment.into(), &ctx).expect("Expected Ok"); // assert. - let unused_amount = unused_asset - .fungible - .get(&asset_1_location_latest.clone().into()) - .map_or(0, |a| *a); + let unused_amount = + unused_asset.fungible.get(&asset_1_location.clone().into()).map_or(0, |a| *a); assert_eq!(unused_amount, extra_amount); assert_eq!(Assets::total_issuance(asset_1), asset_total_issuance + asset_fee); // prepare input to refund weight. let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); - let (reserve1, reserve2) = - AssetConversion::get_reserves(native_location, asset_1_location).unwrap(); + let (reserve1, reserve2) = AssetConversion::get_reserves( + xcm::v3::Location::try_from(native_location).expect("conversion works"), + xcm::v3::Location::try_from(asset_1_location.clone()).expect("conversion works"), + ) + .unwrap(); let asset_refund = AssetConversion::get_amount_out(&refund, &reserve1, &reserve2).unwrap(); // refund. let actual_refund = trader.refund_weight(refund_weight, &ctx).unwrap(); - assert_eq!(actual_refund, (asset_1_location_latest, asset_refund).into()); + assert_eq!(actual_refund, (asset_1_location, asset_refund).into()); // assert. assert_eq!(Balances::balance(&staking_pot), initial_balance); @@ -303,7 +308,8 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { .execute_with(|| { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); - let native_location = TokenLocationV3::get(); + let native_location = + xcm::v3::Location::try_from(TokenLocation::get()).expect("conversion works"); let foreign_location = xcm::v3::Location { parents: 1, interior: ( @@ -435,7 +441,7 @@ fn test_asset_xcm_take_first_trader() { // get asset id as location let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&local_asset_id).unwrap(); + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -603,9 +609,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We are going to buy 4e9 weight let bought = Weight::from_parts(4_000_000_000u64, 0); - - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); // lets calculate amount needed let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -623,7 +627,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We actually use half of the weight let weight_used = bought / 2; - // Make sure refurnd works. + // Make sure refund works. let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); assert_eq!( @@ -677,8 +681,7 @@ fn test_asset_xcm_take_first_trader_refund_not_possible_since_amount_less_than_e // We are going to buy small amount let bought = Weight::from_parts(500_000_000u64, 0); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -730,8 +733,7 @@ fn test_that_buying_ed_refund_does_not_refund_for_take_first_trader() { // We are gonna buy ED let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -807,8 +809,7 @@ fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { // lets calculate amount needed let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let asset: Asset = (asset_location, asset_amount_needed).into(); @@ -925,13 +926,16 @@ fn test_assets_balances_api_works() { ))); // check trusted asset assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&local_asset_id).unwrap(), + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), minimum_asset_balance ) .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - V4V3LocationConverter::convert_back(&foreign_asset_id_location).unwrap(), + WithLatestLocationConverter::::convert_back( + &foreign_asset_id_location + ) + .unwrap(), 6 * foreign_asset_minimum_asset_balance ) .into()))); @@ -1004,7 +1008,7 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ XcmConfig, TrustBackedAssetsInstance, AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvertLatest, + AssetIdForTrustBackedAssetsConvert, collator_session_keys(), ExistentialDeposit::get(), 12345, @@ -1044,7 +1048,7 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p ForeignCreatorsSovereignAccountOf, ForeignAssetsInstance, xcm::v3::Location, - V4V3LocationConverter, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index e25554ec0a5f..554659415a0d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -23,6 +23,7 @@ frame-system = { path = "../../../../../substrate/frame/system", default-feature frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } @@ -111,6 +112,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "hex-literal", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -142,6 +144,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-assets/try-runtime", @@ -189,6 +192,7 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-assets/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index c1fb6367312a..366fb91723ae 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -298,7 +298,7 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, xcm::v3::Location, >, @@ -315,6 +315,11 @@ pub type NativeAndAssets = fungible::UnionOf< AccountId, >; +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (xcm::v3::Location, xcm::v3::Location), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -322,8 +327,12 @@ impl pallet_asset_conversion::Config for Runtime { type AssetKind = xcm::v3::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); - type PoolLocator = - pallet_asset_conversion::WithFirstAsset; + type PoolLocator = pallet_asset_conversion::WithFirstAsset< + WestendLocationV3, + AccountId, + Self::AssetKind, + PoolIdToAccountId, + >; type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam @@ -344,6 +353,18 @@ impl pallet_asset_conversion::Config for Runtime { >; } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed< + ::PoolId, + >; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = weights::pallet_asset_conversion_ops::WeightInfo; +} + parameter_types! { // we just reuse the same deposits pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); @@ -906,6 +927,10 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + + // TODO: the pallet instance should be removed once all pools have migrated + // to the new account IDs. + AssetConversionMigration: pallet_asset_conversion_ops = 200, } ); @@ -1085,6 +1110,7 @@ mod benches { [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] + [pallet_asset_conversion_ops, AssetConversionMigration] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 2f1fcfb05f39..4eebb1f8d786 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -19,6 +19,7 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_ops; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs new file mode 100644 index 000000000000..dfe4092c3f02 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-15, STEPS: `10`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-westend-dev +// --steps=10 +// --repeat=2 +// --pallet=pallet-asset-conversion-ops +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_ops`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_ops::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1105` + // Estimated: `7404` + // Minimum execution time: 2_216_000_000 picoseconds. + Weight::from_parts(2_379_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 360b1a7055b7..41e941ee9a2b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -78,8 +78,6 @@ parameter_types! { PalletInstance(::index() as u8).into(); pub UniquesPalletLocation: Location = PalletInstance(::index() as u8).into(); - pub PoolAssetsPalletLocationV3: xcm::v3::Location = - xcm::v3::Junction::PalletInstance(::index() as u8).into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); pub StakingPot: AccountId = CollatorSelection::account_id(); pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); @@ -172,6 +170,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, + xcm::v3::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -603,7 +602,11 @@ impl xcm_executor::Config for XcmConfig { WeightToFee, crate::NativeAndAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation< + TrustBackedAssetsPalletLocation, + Balance, + xcm::v3::Location, + >, ForeignAssetsConvertedConcreteId, ), ResolveAssetTo, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 6696cb232239..b5957dd5df92 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -22,8 +22,8 @@ use asset_hub_westend_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, - TrustBackedAssetsPalletLocationV3, WestendLocation, WestendLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, + XcmConfig, }, AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, @@ -53,17 +53,14 @@ use sp_runtime::traits::MaybeEquivalence; use std::{convert::Into, ops::Mul}; use testnet_parachains_constants::westend::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; -use xcm_builder::V4V3LocationConverter; +use xcm_builder::WithLatestLocationConverter; use xcm_executor::traits::{ConvertLocation, JustTry, WeightTrader}; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type AssetIdForTrustBackedAssetsConvertLatest = - assets_common::AssetIdForTrustBackedAssetsConvertLatest; + assets_common::AssetIdForTrustBackedAssetsConvert; type RuntimeHelper = asset_test_utils::RuntimeHelper; @@ -204,7 +201,7 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let asset_1: u32 = 1; - let native_location = WestendLocationV3::get(); + let native_location = WestendLocation::get(); let asset_1_location = AssetIdForTrustBackedAssetsConvert::convert_back(&asset_1).unwrap(); // bob's initial balance for native and `asset1` assets. @@ -221,14 +218,24 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(asset_1_location) + Box::new( + xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + ), + Box::new( + xcm::v3::Location::try_from(asset_1_location.clone()) + .expect("conversion works") + ) )); assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(asset_1_location), + Box::new( + xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + ), + Box::new( + xcm::v3::Location::try_from(asset_1_location.clone()) + .expect("conversion works") + ), pool_liquidity, pool_liquidity, 1, @@ -240,8 +247,6 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let asset_total_issuance = Assets::total_issuance(asset_1); let native_total_issuance = Balances::total_issuance(); - let asset_1_location_latest: Location = asset_1_location.try_into().unwrap(); - // prepare input to buy weight. let weight = Weight::from_parts(4_000_000_000, 0); let fee = WeightToFee::weight_to_fee(&weight); @@ -249,7 +254,7 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { AssetConversion::get_amount_in(&fee, &pool_liquidity, &pool_liquidity).unwrap(); let extra_amount = 100; let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - let payment: Asset = (asset_1_location_latest.clone(), asset_fee + extra_amount).into(); + let payment: Asset = (asset_1_location.clone(), asset_fee + extra_amount).into(); // init trader and buy weight. let mut trader = ::Trader::new(); @@ -257,24 +262,25 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { trader.buy_weight(weight, payment.into(), &ctx).expect("Expected Ok"); // assert. - let unused_amount = unused_asset - .fungible - .get(&asset_1_location_latest.clone().into()) - .map_or(0, |a| *a); + let unused_amount = + unused_asset.fungible.get(&asset_1_location.clone().into()).map_or(0, |a| *a); assert_eq!(unused_amount, extra_amount); assert_eq!(Assets::total_issuance(asset_1), asset_total_issuance + asset_fee); // prepare input to refund weight. let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); - let (reserve1, reserve2) = - AssetConversion::get_reserves(native_location, asset_1_location).unwrap(); + let (reserve1, reserve2) = AssetConversion::get_reserves( + xcm::v3::Location::try_from(native_location).expect("conversion works"), + xcm::v3::Location::try_from(asset_1_location.clone()).expect("conversion works"), + ) + .unwrap(); let asset_refund = AssetConversion::get_amount_out(&refund, &reserve1, &reserve2).unwrap(); // refund. let actual_refund = trader.refund_weight(refund_weight, &ctx).unwrap(); - assert_eq!(actual_refund, (asset_1_location_latest, asset_refund).into()); + assert_eq!(actual_refund, (asset_1_location, asset_refund).into()); // assert. assert_eq!(Balances::balance(&staking_pot), initial_balance); @@ -303,7 +309,8 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { .execute_with(|| { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); - let native_location = WestendLocationV3::get(); + let native_location = + xcm::v3::Location::try_from(WestendLocation::get()).expect("conversion works"); let foreign_location = xcm::v3::Location { parents: 1, interior: ( @@ -435,7 +442,7 @@ fn test_asset_xcm_take_first_trader() { // get asset id as location let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&local_asset_id).unwrap(); + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -599,8 +606,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We are going to buy 4e9 weight let bought = Weight::from_parts(4_000_000_000u64, 0); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); // lets calculate amount needed let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -672,8 +678,7 @@ fn test_asset_xcm_take_first_trader_refund_not_possible_since_amount_less_than_e // We are going to buy small amount let bought = Weight::from_parts(500_000_000u64, 0); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -724,8 +729,7 @@ fn test_that_buying_ed_refund_does_not_refund_for_take_first_trader() { let bought = Weight::from_parts(500_000_000u64, 0); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let amount_bought = WeightToFee::weight_to_fee(&bought); @@ -801,8 +805,7 @@ fn test_asset_xcm_take_first_trader_not_possible_for_non_sufficient_assets() { // lets calculate amount needed let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - let asset_location = - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&1).unwrap(); + let asset_location = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); let asset: Asset = (asset_location, asset_amount_needed).into(); @@ -923,13 +926,16 @@ fn test_assets_balances_api_works() { ))); // check trusted asset assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvertLatest::convert_back(&local_asset_id).unwrap(), + AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), minimum_asset_balance ) .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - V4V3LocationConverter::convert_back(&foreign_asset_id_location).unwrap(), + WithLatestLocationConverter::::convert_back( + &foreign_asset_id_location + ) + .unwrap(), 6 * foreign_asset_minimum_asset_balance ) .into()))); @@ -1002,7 +1008,7 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ XcmConfig, TrustBackedAssetsInstance, AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvertLatest, + AssetIdForTrustBackedAssetsConvert, collator_session_keys(), ExistentialDeposit::get(), 12345, @@ -1043,7 +1049,7 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p ForeignCreatorsSovereignAccountOf, ForeignAssetsInstance, xcm::v3::Location, - V4V3LocationConverter, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index fa2752179eb6..431b5766147a 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -26,36 +26,37 @@ pub mod runtime_api; use crate::matching::{LocalLocationPattern, ParentLocation}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; +use sp_runtime::traits::TryConvertInto; +use xcm::latest::Location; use xcm_builder::{ - AsPrefixedGeneralIndex, MatchedConvertedConcreteId, StartsWith, V4V3LocationConverter, + AsPrefixedGeneralIndex, MatchedConvertedConcreteId, StartsWith, WithLatestLocationConverter, }; -use xcm_executor::traits::JustTry; /// `Location` vs `AssetIdForTrustBackedAssets` converter for `TrustBackedAssets` -pub type AssetIdForTrustBackedAssetsConvert = +pub type AssetIdForTrustBackedAssetsConvert = AsPrefixedGeneralIndex< TrustBackedAssetsPalletLocation, AssetIdForTrustBackedAssets, - JustTry, - xcm::v3::Location, + TryConvertInto, + L, >; -pub type AssetIdForTrustBackedAssetsConvertLatest = - AsPrefixedGeneralIndex; - /// `Location` vs `CollectionId` converter for `Uniques` pub type CollectionIdForUniquesConvert = - AsPrefixedGeneralIndex; + AsPrefixedGeneralIndex; /// [`MatchedConvertedConcreteId`] converter dedicated for `TrustBackedAssets` -pub type TrustBackedAssetsConvertedConcreteId = - MatchedConvertedConcreteId< - AssetIdForTrustBackedAssets, - Balance, - StartsWith, - AssetIdForTrustBackedAssetsConvertLatest, - JustTry, - >; +pub type TrustBackedAssetsConvertedConcreteId< + TrustBackedAssetsPalletLocation, + Balance, + L = Location, +> = MatchedConvertedConcreteId< + AssetIdForTrustBackedAssets, + Balance, + StartsWith, + AssetIdForTrustBackedAssetsConvert, + TryConvertInto, +>; /// [`MatchedConvertedConcreteId`] converter dedicated for `Uniques` pub type UniquesConvertedConcreteId = MatchedConvertedConcreteId< @@ -65,28 +66,26 @@ pub type UniquesConvertedConcreteId = MatchedConvertedCon // junction within the pallet itself. StartsWith, CollectionIdForUniquesConvert, - JustTry, + TryConvertInto, >; -/// [`MatchedConvertedConcreteId`] converter dedicated for storing `AssetId` as `Location`. -pub type LocationConvertedConcreteId = MatchedConvertedConcreteId< - xcm::v3::Location, +/// [`MatchedConvertedConcreteId`] converter dedicated for `TrustBackedAssets`, +/// it is a similar implementation to `TrustBackedAssetsConvertedConcreteId`, +/// but it converts `AssetId` to `xcm::v*::Location` type instead of `AssetIdForTrustBackedAssets = +/// u32` +pub type TrustBackedAssetsAsLocation< + TrustBackedAssetsPalletLocation, Balance, - LocationFilter, - V4V3LocationConverter, - JustTry, + L, + LocationConverter = WithLatestLocationConverter, +> = MatchedConvertedConcreteId< + L, + Balance, + StartsWith, + LocationConverter, + TryConvertInto, >; -/// [`MatchedConvertedConcreteId`] converter dedicated for `TrustBackedAssets` -pub type TrustBackedAssetsAsLocation = - MatchedConvertedConcreteId< - xcm::v3::Location, - Balance, - StartsWith, - V4V3LocationConverter, - JustTry, - >; - /// [`MatchedConvertedConcreteId`] converter dedicated for storing `ForeignAssets` with `AssetId` as /// `Location`. /// @@ -95,26 +94,34 @@ pub type TrustBackedAssetsAsLocation = /// - all local Locations /// /// `AdditionalLocationExclusionFilter` can customize additional excluded Locations -pub type ForeignAssetsConvertedConcreteId = - LocationConvertedConcreteId< - EverythingBut<( - // Excludes relay/parent chain currency - Equals, - // Here we rely on fact that something like this works: - // assert!(Location::new(1, - // [Parachain(100)]).starts_with(&Location::parent())); - // assert!([Parachain(100)].into().starts_with(&Here)); - StartsWith, - // Here we can exclude more stuff or leave it as `()` - AdditionalLocationExclusionFilter, - )>, - Balance, - >; +pub type ForeignAssetsConvertedConcreteId< + AdditionalLocationExclusionFilter, + Balance, + AssetId, + LocationToAssetIdConverter = WithLatestLocationConverter, + BalanceConverter = TryConvertInto, +> = MatchedConvertedConcreteId< + AssetId, + Balance, + EverythingBut<( + // Excludes relay/parent chain currency + Equals, + // Here we rely on fact that something like this works: + // assert!(Location::new(1, + // [Parachain(100)]).starts_with(&Location::parent())); + // assert!([Parachain(100)].into().starts_with(&Here)); + StartsWith, + // Here we can exclude more stuff or leave it as `()` + AdditionalLocationExclusionFilter, + )>, + LocationToAssetIdConverter, + BalanceConverter, +>; type AssetIdForPoolAssets = u32; /// `Location` vs `AssetIdForPoolAssets` converter for `PoolAssets`. pub type AssetIdForPoolAssetsConvert = - AsPrefixedGeneralIndex; + AsPrefixedGeneralIndex; /// [`MatchedConvertedConcreteId`] converter dedicated for `PoolAssets` pub type PoolAssetsConvertedConcreteId = MatchedConvertedConcreteId< @@ -122,7 +129,7 @@ pub type PoolAssetsConvertedConcreteId = Balance, StartsWith, AssetIdForPoolAssetsConvert, - JustTry, + TryConvertInto, >; #[cfg(test)] @@ -130,7 +137,7 @@ mod tests { use super::*; use sp_runtime::traits::MaybeEquivalence; use xcm::prelude::*; - use xcm_builder::StartsWithExplicitGlobalConsensus; + use xcm_builder::{StartsWithExplicitGlobalConsensus, WithLatestLocationConverter}; use xcm_executor::traits::{Error as MatchError, MatchesFungibles}; #[test] @@ -143,14 +150,14 @@ mod tests { Location::new(5, [PalletInstance(13), GeneralIndex(local_asset_id.into())]); assert_eq!( - AssetIdForTrustBackedAssetsConvertLatest::::convert_back( + AssetIdForTrustBackedAssetsConvert::::convert_back( &local_asset_id ) .unwrap(), expected_reverse_ref ); assert_eq!( - AssetIdForTrustBackedAssetsConvertLatest::::convert( + AssetIdForTrustBackedAssetsConvert::::convert( &expected_reverse_ref ) .unwrap(), @@ -163,7 +170,7 @@ mod tests { frame_support::parameter_types! { pub TrustBackedAssetsPalletLocation: Location = Location::new(0, [PalletInstance(13)]); } - // setup convert + // set up a converter type TrustBackedAssetsConvert = TrustBackedAssetsConvertedConcreteId; @@ -246,19 +253,21 @@ mod tests { } #[test] - fn location_converted_concrete_id_converter_works() { + fn foreign_assets_converted_concrete_id_converter_works() { frame_support::parameter_types! { pub Parachain100Pattern: Location = Location::new(1, [Parachain(100)]); pub UniversalLocationNetworkId: NetworkId = NetworkId::ByGenesis([9; 32]); } - // setup convert + // set up a converter which uses `xcm::v3::Location` under the hood type Convert = ForeignAssetsConvertedConcreteId< ( StartsWith, StartsWithExplicitGlobalConsensus, ), u128, + xcm::v3::Location, + WithLatestLocationConverter, >; let test_data = vec![ diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 478bba4565dc..3aad88e177ca 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -113,17 +113,14 @@ impl, Reserves: ContainsPair devolved, - Err(_) => { - log::trace!( - target: "xcm::contains", - "IsTrustedBridgedReserveLocationForConcreteAsset origin: {:?} is not remote to the universal_source: {:?}", - origin, universal_source - ); - return false - }, - }; + if ensure_is_remote(universal_source.clone(), origin.clone()).is_err() { + log::trace!( + target: "xcm::contains", + "IsTrustedBridgedReserveLocationForConcreteAsset origin: {:?} is not remote to the universal_source: {:?}", + origin, universal_source + ); + return false + } // check asset according to the configured reserve locations Reserves::contains(asset, origin) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 6dbf96edc2ab..8845f0538b5c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -29,6 +29,10 @@ use crate::{ use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ + extensions::refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, + }, messages, messages::{ source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, @@ -39,10 +43,6 @@ use bridge_runtime_common::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, - }, }; use frame_support::{parameter_types, traits::PalletInfoAccess}; @@ -273,7 +273,7 @@ mod tests { // Bulletin chain - it has the same (almost) runtime for Polkadot Bulletin and Rococo // Bulletin, so we have to adhere Polkadot names here - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< Runtime, WithRococoBulletinMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 5d55d7afbacf..e5a00073407f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -28,6 +28,10 @@ use crate::{ use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ + extensions::refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, RefundableParachain, + }, messages, messages::{ source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, @@ -38,10 +42,6 @@ use bridge_runtime_common::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, }; use codec::Encode; @@ -318,7 +318,7 @@ mod tests { }, }); - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubWestendMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index bce722aa5f87..d5da41cce286 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -25,6 +25,10 @@ use bp_messages::LaneId; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::Chain; use bridge_runtime_common::{ + extensions::refund_relayer_extension::{ + ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, RefundableParachain, + }, messages, messages::{ source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, @@ -35,10 +39,6 @@ use bridge_runtime_common::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, }; use codec::Encode; use frame_support::{ @@ -352,7 +352,7 @@ mod tests { }, }); - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubRococoMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 3816d2ed848e..94765287637b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -21,13 +21,16 @@ mod tracks; use crate::{ weights, xcm_config::{FellowshipAdminBodyId, LocationToAccountId, TreasurerBodyId, UsdtAssetHub}, - AccountId, AssetRate, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, + AccountId, AssetRate, Balance, Balances, FellowshipReferenda, GovernanceLocation, + ParachainInfo, Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, + WestendTreasuryAccount, DAYS, }; +use cumulus_primitives_core::ParaId; use frame_support::{ parameter_types, traits::{ - EitherOf, EitherOfDiverse, MapSuccess, NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, + tokens::UnityOrOuterConversion, EitherOf, EitherOfDiverse, FromContains, MapSuccess, + NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, }, PalletId, }; @@ -40,10 +43,10 @@ use pallet_ranked_collective::EnsureOfRank; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use parachains_common::impls::ToParentTreasury; use polkadot_runtime_common::impls::{ - LocatableAssetConverter, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, VersionedLocatableAsset, VersionedLocationConverter, }; use sp_arithmetic::Permill; -use sp_core::{ConstU128, ConstU32}; +use sp_core::{ConstU128, ConstU32, ConstU8}; use sp_runtime::traits::{ConstU16, ConvertToValue, IdentityLookup, Replace, TakeFirst}; use testnet_parachains_constants::westend::{account, currency::GRAND}; use westend_runtime_constants::time::HOURS; @@ -263,6 +266,7 @@ parameter_types! { // The asset's interior location for the paying account. This is the Fellowship Treasury // pallet instance (which sits at index 65). pub FellowshipTreasuryInteriorLocation: InteriorLocation = PalletInstance(65).into(); + pub SelfParaId: ParaId = ParachainInfo::parachain_id(); } #[cfg(feature = "runtime-benchmarks")] @@ -345,7 +349,15 @@ impl pallet_treasury::Config for Runtime { type Paymaster = FellowshipTreasuryPaymaster; #[cfg(feature = "runtime-benchmarks")] type Paymaster = PayWithEnsure>>; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsSiblingSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = ConstU32<{ 30 * DAYS }>; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments< diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 171ac6a9528f..fcd786711bbe 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -71,7 +71,7 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = (); + type Migrations = (pallet_contracts::migration::v16::Migration,); type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 3863ea5022f9..46fcbc6319c9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -78,7 +78,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = FungibleAdapter< +pub type FungibleTransactor = FungibleAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: @@ -171,7 +171,7 @@ pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; type XcmSender = XcmRouter; - type AssetTransactor = CurrencyTransactor; + type AssetTransactor = FungibleTransactor; type OriginConverter = XcmOriginToTransactDispatchOrigin; type IsReserve = NativeAsset; type IsTeleporter = TrustedTeleporter; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 9959c15b1110..ad065ee34774 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -109,6 +109,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_broker::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -719,11 +720,20 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); fn reachable_dest() -> Option { Some(Parent.into()) @@ -741,8 +751,21 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // Reserve transfers are disabled - None + // Coretime chain can reserve transfer regions to some random parachain. + + // Properties of a mock region: + let core = 0; + let begin = 0; + let end = 42; + + let region_id = pallet_broker::Pallet::::issue(core, begin, end, None, None); + Some(( + Asset { + fun: NonFungible(Index(region_id.into())), + id: AssetId(xcm_config::BrokerPalletLocation::get()) + }, + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), + )) } fn get_asset() -> Asset { @@ -758,15 +781,25 @@ impl_runtime_apis! { RocRelayLocation::get(), ExistentialDeposit::get() ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { Ok(RocRelayLocation::get()) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index b0b276128272..7eab53b8a7cf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -77,7 +77,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = FungibleAdapter< +pub type FungibleTransactor = FungibleAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: @@ -106,7 +106,7 @@ pub type RegionTransactor = NonFungibleAdapter< >; /// Means for transacting assets on this chain. -pub type AssetTransactors = (CurrencyTransactor, RegionTransactor); +pub type AssetTransactors = (FungibleTransactor, RegionTransactor); /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, /// ready for dispatching a transaction with XCM's `Transact`. There is an `OriginKind` that can @@ -293,7 +293,7 @@ impl pallet_xcm::Config for Runtime { type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type XcmReserveTransferFilter = Everything; type Weigher = WeightInfoBounds< crate::weights::xcm::CoretimeRococoXcmWeight, RuntimeCall, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 1ee8c3bc0ec3..0f0742268618 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -109,6 +109,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_broker::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -218,6 +219,7 @@ impl pallet_authorship::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl pallet_balances::Config for Runtime { @@ -710,11 +712,20 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); fn reachable_dest() -> Option { Some(Parent.into()) @@ -732,8 +743,21 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // Reserve transfers are disabled - None + // Coretime chain can reserve transfer regions to some random parachain. + + // Properties of a mock region: + let core = 0; + let begin = 0; + let end = 42; + + let region_id = pallet_broker::Pallet::::issue(core, begin, end, None, None); + Some(( + Asset { + fun: NonFungible(Index(region_id.into())), + id: AssetId(xcm_config::BrokerPalletLocation::get()) + }, + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), + )) } fn get_asset() -> Asset { @@ -753,11 +777,22 @@ impl_runtime_apis! { impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { Ok(TokenRelayLocation::get()) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 346bdfa4d8c9..e1452ec63f20 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -300,7 +300,7 @@ impl pallet_xcm::Config for Runtime { type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type XcmReserveTransferFilter = Everything; type Weigher = WeightInfoBounds< crate::weights::xcm::CoretimeWestendXcmWeight, RuntimeCall, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 919bfe83e7d7..89885d77378b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -72,18 +72,14 @@ use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm_config::XcmOriginToTransactDispatchOrigin; +use xcm_config::{ForeignAssetsAssetId, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -// Polkadot imports +use parachains_common::{AccountId, Signature}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -// XCM Imports -use parachains_common::{AccountId, Signature}; use xcm::latest::prelude::{AssetId as AssetLocationId, BodyId}; /// Balance of an account. @@ -474,8 +470,8 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v3::Location; - type AssetIdParameter = xcm::v3::Location; + type AssetId = ForeignAssetsAssetId; + type AssetIdParameter = ForeignAssetsAssetId; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index c12372abbe90..6832e2f4f440 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -40,7 +40,7 @@ use pallet_xcm::XcmPassthrough; use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; -use sp_runtime::traits::{AccountIdConversion, ConvertInto}; +use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -58,9 +58,16 @@ parameter_types! { pub const RelayLocation: Location = Location::parent(); // Local native currency which is stored in `pallet_balances`` pub const PenpalNativeCurrency: Location = Location::here(); - pub const RelayNetwork: Option = None; + // The Penpal runtime is utilized for testing with various environment setups. + // This storage item allows us to customize the `NetworkId` where Penpal is deployed. + // By default, it is set to `NetworkId::Rococo` and can be changed using `System::set_storage`. + pub storage RelayNetworkId: NetworkId = NetworkId::Westend; + pub RelayNetwork: Option = Some(RelayNetworkId::get()); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub UniversalLocation: InteriorLocation = [ + GlobalConsensus(RelayNetworkId::get()), + Parachain(ParachainInfo::parachain_id().into()) + ].into(); pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); } @@ -77,7 +84,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting assets on this chain. -pub type CurrencyTransactor = FungibleAdapter< +pub type FungibleTransactor = FungibleAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: @@ -124,7 +131,11 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -pub type ForeignAssetsConvertedConcreteId = assets_common::LocationConvertedConcreteId< +// Using the latest `Location`, we don't need to worry about migrations for Penpal. +pub type ForeignAssetsAssetId = Location; +pub type ForeignAssetsConvertedConcreteId = xcm_builder::MatchedConvertedConcreteId< + Location, + Balance, EverythingBut<( // Here we rely on fact that something like this works: // assert!(Location::new(1, @@ -132,7 +143,8 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::LocationConvertedConc // assert!([Parachain(100)].into().starts_with(&Here)); StartsWith, )>, - Balance, + Identity, + TryConvertInto, >; /// Means for transacting foreign assets from different global consensus. @@ -152,7 +164,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< >; /// Means for transacting assets on this chain. -pub type AssetTransactors = (CurrencyTransactor, ForeignFungiblesTransactor, FungiblesTransactor); +pub type AssetTransactors = (FungibleTransactor, ForeignFungiblesTransactor, FungiblesTransactor); /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, /// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can @@ -409,8 +421,8 @@ impl cumulus_pallet_xcm::Config for Runtime { /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v3::Location { - xcm::v3::Location::new(1, [xcm::v3::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> ForeignAssetsAssetId { + Location::new(1, [Parachain(id)]) } } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 154c2c460004..df335368be1c 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -346,7 +346,7 @@ pub type LocationToAccountId = ( ); /// Means for transacting assets on this chain. -pub type CurrencyTransactor = FungibleAdapter< +pub type FungibleTransactor = FungibleAdapter< // Use this currency: Balances, // Use this currency when it is a fungible asset matching the given location or name: @@ -385,7 +385,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; /// Means for transacting assets on this chain. -pub type AssetTransactors = (CurrencyTransactor, FungiblesTransactor); +pub type AssetTransactors = (FungibleTransactor, FungiblesTransactor); /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, /// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index fec6e144e40f..f7d2fd0f0be3 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -55,11 +55,6 @@ pub enum Subcommand { /// The pallet benchmarking moved to the `pallet` sub-command. #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - - /// Try-runtime has migrated to a standalone - /// [CLI](). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after January 2024. - TryRuntime, } const AFTER_HELP_EXAMPLE: &str = color_print::cstr!( diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index a897c7c534da..041187de488f 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -549,9 +549,8 @@ pub fn run() -> Result<()> { }, Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| { - construct_partials!(config, |partials| cmd.run(partials.client)) - }) + runner + .sync_run(|config| construct_partials!(config, |partials| cmd.run(partials.client))) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; @@ -601,7 +600,6 @@ pub fn run() -> Result<()> { _ => Err("Benchmarking sub-command unsupported".into()), } }, - Some(Subcommand::TryRuntime) => Err("The `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI.".into()), Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), None => { let runner = cli.create_runner(&cli.run.normalize())?; @@ -702,7 +700,7 @@ async fn start_node>( hwbench: Option, ) -> Result { match config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::< + Runtime::AssetHubPolkadot => crate::service::start_asset_hub_lookahead_node::< AssetHubPolkadotRuntimeApi, AssetHubPolkadotAuraId, Network, @@ -711,16 +709,7 @@ async fn start_node>( .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubKusama => crate::service::start_asset_hub_node::< - RuntimeApi, - AuraId, - Network, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::AssetHubRococo | Runtime::AssetHubWestend => + Runtime::AssetHubRococo | Runtime::AssetHubWestend | Runtime::AssetHubKusama => crate::service::start_asset_hub_lookahead_node::( config, polkadot_config, @@ -732,18 +721,7 @@ async fn start_node>( .map(|r| r.0) .map_err(Into::into), - Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::CollectivesWestend => + Runtime::CollectivesWestend | Runtime::CollectivesPolkadot => crate::service::start_generic_aura_lookahead_node::( config, polkadot_config, @@ -779,39 +757,12 @@ async fn start_node>( Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal => - crate::service::start_generic_aura_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), + chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal => - crate::service::start_generic_aura_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), + chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), + chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment | chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 2dd3541e85f4..12eda3e8a9cb 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -17,10 +17,7 @@ use codec::{Codec, Decode}; use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; -use cumulus_client_consensus_aura::collators::{ - basic::{self as basic_aura, Params as BasicAuraParams}, - lookahead::{self as aura, Params as AuraParams}, -}; +use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; use cumulus_client_consensus_common::{ ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, }; @@ -687,82 +684,6 @@ where Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) } -/// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, - }; - - let fut = - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. @@ -787,142 +708,6 @@ pub async fn start_generic_aura_lookahead_node> .await } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -pub async fn start_asset_hub_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, - Net: NetworkBackend, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; - - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } - } - - let proposer = Proposer::new(proposer_factory); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface2, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: Some(request_stream), - }; - - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) - .await - }); - - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); - - Ok(()) - }, - hwbench, - ) - .await -} - /// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub /// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and /// needs to sync and upgrade before it can run `AuraApi` functions. diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 4bfb73acda05..938f5cc45a11 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v2023-11-07-rococo-westend-initial-relayer +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.2.1 # metadata ARG VCS_REF diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index af0ede5107a6..0c8165af40f4 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -1,55 +1,31 @@ # PRDoc -## Intro - -With the merge of [PR #1946](https://github.com/paritytech/polkadot-sdk/pull/1946), a new method for -documenting changes has been introduced: `prdoc`. The [prdoc repository](https://github.com/paritytech/prdoc) -contains more documentation and tooling. - -The current document describes how to quickly get started authoring `PRDoc` files. +A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to +record changes on a crate level. This information is then processed by the release team to apply the correct crate +version bumps and to generate the CHANGELOG of the next release. ## Requirements -When creating a PR, the author needs to decides with the `R0` label whether the change (PR) should -appear in the release notes or not. - -Labelling a PR with `R0` means that no `PRDoc` is required. - -A PR without the `R0` label **does** require a valid `PRDoc` file to be introduced in the PR. - -## PRDoc how-to - -A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). - -For significant changes, a `.prdoc` file is mandatory and the file must meet the following -requirements: -- file named `pr_NNNN.prdoc` where `NNNN` is the PR number. - For convenience, those file can also contain a short description: `pr_NNNN_foobar.prdoc`. -- located under the [`prdoc` folder](https://github.com/paritytech/polkadot-sdk/tree/master/prdoc) of the repository -- compliant with the [JSON schema](https://json-schema.org/) defined in `prdoc/schema_user.json` - -Those requirements can be fulfilled manually without any tooling but a text editor. - -## Tooling - -Users might find the following helpers convenient: -- Setup VSCode to be aware of the prdoc schema: see [using VSCode](https://github.com/paritytech/prdoc#using-vscode) -- Using the `prdoc` cli to: - - generate a `PRDoc` file from a [template defined in the Polkadot SDK - repo](https://github.com/paritytech/polkadot-sdk/blob/master/prdoc/.template.prdoc) simply providing a PR number - - check the validity of one or more `PRDoc` files +When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The +`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping +the [CODEOWNERS](../../.github/CODEOWNERS) for advice. -## `prdoc` cli usage +## PRDoc How-To -The `prdoc` cli documentation can be found at https://github.com/paritytech/prdoc#prdoc +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: -tldr: -- `prdoc generate ` -- `prdoc check -n ` +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install prdoc`. +1. Open a Pull Request and get the PR number. +1. Generate the file with `prdoc generate `. The output filename will be printed. +1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example +[VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). +1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections. +1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check it. -where is the PR number. +> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file: +> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` -## Pick an audience +## Pick An Audience While describing a PR, the author needs to consider which audience(s) need to be addressed. The list of valid audiences is described and documented in the JSON schema as follow: @@ -65,7 +41,41 @@ The list of valid audiences is described and documented in the JSON schema as fo - `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. -## Tips +If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and +re-phrase the changes for each audience. + +## Record SemVer Changes + +All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells +the release team how to bump the crate version prior to the next release. It is very important that this information is +correct, otherwise it could break the code of downstream teams. + +The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by +[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other +applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs +are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc +about them. + +> **Note**: There is currently no CI in place to sanity check this information, but should be added soon. + +### Example + +For example when you modified two crates and record the changes: + +```yaml +crates: +- name: frame-example + bump: major +- name: frame-example-pallet + bump: minor +``` + +It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using +`frame-example` might break. + +### Dependencies -The PRDoc schema is defined in each repo and usually is quite restrictive. -You cannot simply add a new property to a `PRDoc` file unless the Schema allows it. +A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to +bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change. +`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest +compatible version. diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index b0c22c5a97fe..719d00490a9d 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -33,7 +33,6 @@ sp-io = { path = "../../substrate/primitives/io" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-maybe-compressed-blob = { path = "../../substrate/primitives/maybe-compressed-blob" } frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli", optional = true } -try-runtime-cli = { path = "../../substrate/utils/frame/try-runtime/cli", optional = true } sc-cli = { path = "../../substrate/client/cli", optional = true } sc-service = { path = "../../substrate/client/service", optional = true } polkadot-node-metrics = { path = "../node/metrics" } @@ -57,7 +56,6 @@ cli = [ "sc-service", "sc-tracing", "service", - "try-runtime-cli", ] runtime-benchmarks = [ "frame-benchmarking-cli?/runtime-benchmarks", @@ -70,7 +68,6 @@ full-node = ["service/full-node"] try-runtime = [ "service/try-runtime", "sp-runtime/try-runtime", - "try-runtime-cli/try-runtime", ] fast-runtime = ["service/fast-runtime"] pyroscope = ["pyro", "pyroscope_pprofrs"] diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 74e190444693..3737942e6e53 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -50,11 +50,6 @@ pub enum Subcommand { #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try-runtime has migrated to a standalone CLI - /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after January 2024. - TryRuntime, - /// Key management CLI utilities #[command(subcommand)] Key(sc_cli::KeySubcommand), diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index b163a2c1367d..6af93a756388 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -476,13 +476,6 @@ pub fn run() -> Result<()> { } }, Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), - #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.to_owned().into()), - #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ - You can enable it with `--features try-runtime`." - .to_owned() - .into()), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; Ok(runner.sync_run(|config| cmd.run::(&config))?) diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 60ea1cf5ff48..374f090a2671 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -206,9 +206,12 @@ async fn handle_new_activations( // follow the procedure from the guide: // https://paritytech.github.io/polkadot-sdk/book/node/collators/collation-generation.html + // If there is no collation function provided, bail out early. + // Important: Lookahead collator and slot based collator do not use `CollatorFn`. if config.collator.is_none() { return Ok(()) } + let para_id = config.para_id; let _overall_timer = metrics.time_new_activations(); @@ -232,9 +235,14 @@ async fn handle_new_activations( // The loop bellow will fill in cores that the para is allowed to build on. let mut cores_to_build_on = Vec::new(); + // This assumption refers to all cores of the parachain, taking elastic scaling + // into account. + let mut para_assumption = None; for (core_idx, core) in availability_cores.into_iter().enumerate() { - let scheduled_core = match core { - CoreState::Scheduled(scheduled_core) => scheduled_core, + // This nested assumption refers only to the core being iterated. + let (core_assumption, scheduled_core) = match core { + CoreState::Scheduled(scheduled_core) => + (OccupiedCoreAssumption::Free, scheduled_core), CoreState::Occupied(occupied_core) => match async_backing_params { Some(params) if params.max_candidate_depth >= 1 => { // maximum candidate depth when building on top of a block @@ -257,7 +265,7 @@ async fn handle_new_activations( }; match res { - Some(res) => res, + Some(res) => (OccupiedCoreAssumption::Included, res), None => continue, } }, @@ -291,6 +299,10 @@ async fn handle_new_activations( "core is not assigned to our para. Keep going.", ); } else { + // This does not work for elastic scaling, but it should be enough for single + // core parachains. If async backing runtime is available we later override + // the assumption based on the `para_backing_state` API response. + para_assumption = Some(core_assumption); // Accumulate cores for building collation(s) outside the loop. cores_to_build_on.push(CoreIndex(core_idx as u32)); } @@ -301,34 +313,43 @@ async fn handle_new_activations( continue } - let para_backing_state = - request_para_backing_state(relay_parent, config.para_id, ctx.sender()) - .await - .await?? - .ok_or(crate::error::Error::MissingParaBackingState)?; - - // We are being very optimistic here, but one of the cores could pend availability some more - // block, ore even time out. - // For timeout assumption the collator can't really know because it doesn't receive bitfield - // gossip. - let assumption = if para_backing_state.pending_availability.is_empty() { - OccupiedCoreAssumption::Free - } else { - OccupiedCoreAssumption::Included - }; + // If at least one core is assigned to us, `para_assumption` is `Some`. + let Some(mut para_assumption) = para_assumption else { continue }; + + // If it is none it means that neither async backing or elastic scaling (which + // depends on it) are supported. We'll use the `para_assumption` we got from + // iterating cores. + if async_backing_params.is_some() { + // We are being very optimistic here, but one of the cores could pend availability some + // more block, ore even time out. + // For timeout assumption the collator can't really know because it doesn't receive + // bitfield gossip. + let para_backing_state = + request_para_backing_state(relay_parent, config.para_id, ctx.sender()) + .await + .await?? + .ok_or(crate::error::Error::MissingParaBackingState)?; + + // Override the assumption about the para's assigned cores. + para_assumption = if para_backing_state.pending_availability.is_empty() { + OccupiedCoreAssumption::Free + } else { + OccupiedCoreAssumption::Included + } + } gum::debug!( target: LOG_TARGET, relay_parent = ?relay_parent, - our_para = %config.para_id, - ?assumption, + our_para = %para_id, + ?para_assumption, "Occupied core(s) assumption", ); let mut validation_data = match request_persisted_validation_data( relay_parent, - config.para_id, - assumption, + para_id, + para_assumption, ctx.sender(), ) .await @@ -339,7 +360,7 @@ async fn handle_new_activations( gum::debug!( target: LOG_TARGET, relay_parent = ?relay_parent, - our_para = %config.para_id, + our_para = %para_id, "validation data is not available", ); continue @@ -348,8 +369,8 @@ async fn handle_new_activations( let validation_code_hash = match obtain_validation_code_hash_with_assumption( relay_parent, - config.para_id, - assumption, + para_id, + para_assumption, ctx.sender(), ) .await? @@ -359,7 +380,7 @@ async fn handle_new_activations( gum::debug!( target: LOG_TARGET, relay_parent = ?relay_parent, - our_para = %config.para_id, + our_para = %para_id, "validation code hash is not found.", ); continue diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 1ec2cccfae71..10c391cba25d 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -807,6 +807,56 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run OccupiedCoreAssumption::Included, 1, pending_availability, + runtime_version, + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn distribute_collation_for_occupied_core_pre_async_backing() { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + let total_cores = 3; + + // Use runtime version before async backing + let runtime_version = RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT - 1; + + let cores = (0..total_cores) + .into_iter() + .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + runtime_version, + claim_queue, + ) + .await; + + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` + OccupiedCoreAssumption::Free, + total_cores, + vec![], + runtime_version, ) .await; @@ -826,6 +876,8 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti ) { let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); + // Using latest runtime with the fancy claim queue exposed. + let runtime_version = RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT; let cores = (0..3) .into_iter() @@ -863,8 +915,7 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti activated_hash, AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, cores, - // Using latest runtime with the fancy claim queue exposed. - RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + runtime_version, claim_queue, ) .await; @@ -882,6 +933,7 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti }, total_cores, pending_availability, + runtime_version, ) .await; @@ -901,6 +953,8 @@ fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_sc ) { let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); + // Using latest runtime with the fancy claim queue exposed. + let runtime_version = RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT; let cores = (0..total_cores) .into_iter() @@ -921,8 +975,7 @@ fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_sc activated_hash, AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, cores, - // Using latest runtime with the fancy claim queue exposed. - RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + runtime_version, claim_queue, ) .await; @@ -935,6 +988,7 @@ fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_sc OccupiedCoreAssumption::Free, total_cores, vec![], + runtime_version, ) .await; @@ -1074,6 +1128,13 @@ mod helpers { } ); + let async_backing_response = + if runtime_version >= RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT { + Ok(async_backing_params) + } else { + Err(RuntimeApiError::NotSupported { runtime_api_name: "async_backing_params" }) + }; + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -1083,7 +1144,7 @@ mod helpers { ), )) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(async_backing_params)); + let _ = tx.send(async_backing_response); } ); @@ -1121,6 +1182,7 @@ mod helpers { expected_occupied_core_assumption: OccupiedCoreAssumption, cores_assigned: usize, pending_availability: Vec, + runtime_version: u32, ) { // Expect no messages if no cores is assigned to the para if cores_assigned == 0 { @@ -1138,14 +1200,16 @@ mod helpers { max_pov_size: 1024, }; - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) - ) if parent == activated_hash && p_id == para_id => { - tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); - } - ); + if runtime_version >= RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) + ) if parent == activated_hash && p_id == para_id => { + tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); + } + ); + } assert_matches!( overseer_recv(virtual_overseer).await, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs similarity index 50% rename from polkadot/node/core/prospective-parachains/src/fragment_tree.rs rename to polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs index 8061dc82d835..86814b976d13 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs @@ -86,6 +86,9 @@ //! will still perform fairly well under these conditions, despite being somewhat wasteful of //! memory. +#[cfg(test)] +mod tests; + use std::{ borrow::Cow, collections::{ @@ -1145,1447 +1148,3 @@ impl FragmentNode { self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) } } - -#[cfg(test)] -mod tests { - use super::*; - use assert_matches::assert_matches; - use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; - use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; - use polkadot_primitives_test_helpers as test_helpers; - use rstest::rstest; - use std::iter; - - impl NodePointer { - fn unwrap_idx(self) -> usize { - match self { - NodePointer::Root => panic!("Unexpected root"), - NodePointer::Storage(index) => index, - } - } - } - - fn make_constraints( - min_relay_parent_number: BlockNumber, - valid_watermarks: Vec, - required_parent: HeadData, - ) -> Constraints { - Constraints { - min_relay_parent_number, - max_pov_size: 1_000_000, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - max_ump_num_per_candidate: 10, - dmp_remaining_messages: [0; 10].into(), - hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, - hrmp_channels_out: HashMap::new(), - max_hrmp_num_per_candidate: 0, - required_parent, - validation_code_hash: Hash::repeat_byte(42).into(), - upgrade_restriction: None, - future_validation_code: None, - } - } - - fn make_committed_candidate( - para_id: ParaId, - relay_parent: Hash, - relay_parent_number: BlockNumber, - parent_head: HeadData, - para_head: HeadData, - hrmp_watermark: BlockNumber, - ) -> (PersistedValidationData, CommittedCandidateReceipt) { - let persisted_validation_data = PersistedValidationData { - parent_head, - relay_parent_number, - relay_parent_storage_root: Hash::repeat_byte(69), - max_pov_size: 1_000_000, - }; - - let candidate = CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id, - relay_parent, - collator: test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: test_helpers::dummy_collator_signature(), - para_head: para_head.hash(), - validation_code_hash: Hash::repeat_byte(42).into(), - }, - commitments: CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: para_head, - processed_downward_messages: 1, - hrmp_watermark, - }, - }; - - (persisted_validation_data, candidate) - } - - #[test] - fn scope_rejects_ancestors_that_skip_blocks() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 10, - hash: Hash::repeat_byte(10), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![RelayChainBlockInfo { - number: 8, - hash: Hash::repeat_byte(8), - storage_root: Hash::repeat_byte(69), - }]; - - let max_depth = 2; - let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors - ), - Err(UnexpectedAncestor { number: 8, prev: 10 }) - ); - } - - #[test] - fn scope_rejects_ancestor_for_0_block() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 0, - hash: Hash::repeat_byte(0), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![RelayChainBlockInfo { - number: 99999, - hash: Hash::repeat_byte(99), - storage_root: Hash::repeat_byte(69), - }]; - - let max_depth = 2; - let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors, - ), - Err(UnexpectedAncestor { number: 99999, prev: 0 }) - ); - } - - #[test] - fn scope_only_takes_ancestors_up_to_min() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 5, - hash: Hash::repeat_byte(0), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![ - RelayChainBlockInfo { - number: 4, - hash: Hash::repeat_byte(4), - storage_root: Hash::repeat_byte(69), - }, - RelayChainBlockInfo { - number: 3, - hash: Hash::repeat_byte(3), - storage_root: Hash::repeat_byte(69), - }, - RelayChainBlockInfo { - number: 2, - hash: Hash::repeat_byte(2), - storage_root: Hash::repeat_byte(69), - }, - ]; - - let max_depth = 2; - let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - let scope = Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors, - ) - .unwrap(); - - assert_eq!(scope.ancestors.len(), 2); - assert_eq!(scope.ancestors_by_hash.len(), 2); - } - - #[test] - fn storage_add_candidate() { - let mut storage = CandidateStorage::new(); - let relay_parent = Hash::repeat_byte(69); - - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - relay_parent, - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, - ); - - let candidate_hash = candidate.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); - assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - - assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); - } - - #[test] - fn storage_retain() { - let mut storage = CandidateStorage::new(); - - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - Hash::repeat_byte(69), - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, - ); - - let candidate_hash = candidate.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); - storage.retain(|_| true); - assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - assert!(storage.head_data_by_hash(&output_head_hash).is_some()); - - storage.retain(|_| false); - assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); - assert!(storage.head_data_by_hash(&output_head_hash).is_none()); - } - - // [`FragmentTree::populate`] should pick up candidates that build on other candidates. - #[test] - fn populate_works_recursively() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert!(candidates.contains(&candidate_a_hash)); - assert!(candidates.contains(&candidate_b_hash)); - - assert_eq!(tree.nodes.len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[0].depth, 0); - - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[1].depth, 1); - } - - #[test] - fn children_of_root_are_contiguous() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - let (pvd_a2, candidate_a2) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b, 1].into(), - 0, - ); - let candidate_a2_hash = candidate_a2.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_a2, pvd_a2).unwrap(); - tree.add_and_populate(candidate_a2_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); - } - - #[test] - fn add_candidate_child_of_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - } - - #[test] - fn add_candidate_child_of_non_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - } - - #[test] - fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 10; - - // Empty tree - let storage = CandidateStorage::new(); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - - let relay_parent_info = - RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - assert_eq!(tree.candidates().collect::>().len(), 0); - assert_eq!(tree.nodes.len(), 0); - - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); - // Invalid candidate. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); - assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); - } - - #[rstest] - #[case(true, 13)] - #[case(false, 8)] - // The tree with no cycles looks like: - // Make a tree that looks like this (note that there's no cycle): - // +-(root)-+ - // | | - // +----0---+ 7 - // | | - // 1----+ 5 - // | | - // | | - // 2 6 - // | - // 3 - // | - // 4 - // - // The tree with cycles is the same as the first but has a cycle from 4 back to the state - // produced by 0 (It's bounded by the max_depth + 1). - // +-(root)-+ - // | | - // +----0---+ 7 - // | | - // 1----+ 5 - // | | - // | | - // 2 6 - // | - // 3 - // | - // 4---+ - // | | - // 1 5 - // | - // 2 - // | - // 3 - fn test_find_ancestor_path_and_find_backable_chain( - #[case] has_cycle: bool, - #[case] expected_node_count: usize, - ) { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 7; - let relay_parent_number = 0; - let relay_parent_storage_root = Hash::repeat_byte(69); - - let mut candidates = vec![]; - - // Candidate 0 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![0].into(), - 0, - )); - // Candidate 1 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![1].into(), - 0, - )); - // Candidate 2 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![2].into(), - 0, - )); - // Candidate 3 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![2].into(), - vec![3].into(), - 0, - )); - // Candidate 4 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![4].into(), - 0, - )); - // Candidate 5 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![5].into(), - 0, - )); - // Candidate 6 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![6].into(), - 0, - )); - // Candidate 7 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![7].into(), - 0, - )); - - if has_cycle { - candidates[4] = make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![0].into(), // put the cycle here back to the output state of 0. - 0, - ); - } - - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::new(); - - let relay_parent_info = RelayChainBlockInfo { - number: relay_parent_number, - hash: relay_parent, - storage_root: relay_parent_storage_root, - }; - - for (pvd, candidate) in candidates.iter() { - storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); - } - let candidates = - candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!(tree.candidates().collect::>().len(), candidates.len()); - assert_eq!(tree.nodes.len(), expected_node_count); - - // Do some common tests on both trees. - { - // No ancestors supplied. - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // Ancestor which is not part of the tree. Will be ignored. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // A chain fork. - let ancestors: Ancestors = - [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - // Ancestors which are part of the tree but don't form a path. Will be ignored. - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors. - let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[7].hash()); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = - [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[0].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - if has_cycle { - assert_eq!( - tree.find_backable_chain(ancestors, 2, |_| true), - [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } else { - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - assert_eq!(res, NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Requested count is 0. - assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - } - - // Now do some tests only on the tree with cycles - if has_cycle { - // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at - // 0-1-2-3-4-1-2-3. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - ] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[4].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1-2. - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 1, |_| true), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - tree.find_backable_chain(ancestors, 5, |_| true), - [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1 - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 6, |_| true), - [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), - ); - - // For 0-1-2-3-4-5, there's more than 1 way of finding this path in - // the tree. `None` should be returned. The runtime should not have accepted this. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - candidates[5].hash(), - ] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()); - assert_eq!(res, None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - } - } - - #[test] - fn graceful_cycle_of_0() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 1); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - - for count in 1..10 { - assert_eq!( - tree.find_backable_chain(Ancestors::new(), count, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize, max_depth + 1)) - .collect::>() - ); - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash].into_iter().collect(), - count - 1, - |_| true - ), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize - 1, max_depth)) - .collect::>() - ); - } - } - - #[test] - fn graceful_cycle_of_1() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - - assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 2, |_| true), - vec![candidate_a_hash, candidate_b_hash], - ); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 3, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), - vec![candidate_b_hash, candidate_a_hash], - ); - - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 6, |_| true), - vec![ - candidate_a_hash, - candidate_b_hash, - candidate_a_hash, - candidate_b_hash, - candidate_a_hash - ], - ); - - for count in 3..7 { - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash, candidate_b_hash].into_iter().collect(), - count, - |_| true - ), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - } - } - - #[test] - fn hypothetical_depths_known_and_unknown() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_b_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3], - ); - - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(21)), - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); - - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(22)), - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3] - ); - } - - #[test] - fn hypothetical_depths_stricter_on_complete() { - let storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 1000, // watermark is illegal - ); - - let candidate_a_hash = candidate_a.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0], - ); - - assert!(tree - .hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Complete { - receipt: Cow::Owned(candidate_a), - persisted_validation_data: Cow::Owned(pvd_a), - }, - &storage, - false, - ) - .is_empty()); - } - - #[test] - fn hypothetical_depths_backed_in_path() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0d].into(), - 0, - ); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.add_candidate(candidate_c, pvd_c).unwrap(); - - // `A` and `B` are backed, `C` is not. - storage.mark_backed(&candidate_a_hash); - storage.mark_backed(&candidate_b_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - assert_eq!(tree.nodes.len(), 3); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![0], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![2], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - Vec::::new(), - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![2], // non-empty if `false`. - ); - } - - #[test] - fn pending_availability_in_scope() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - // Note that relay parent `a` is not allowed. - let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let pending_availability = vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number + 1, - hash: relay_parent_c, - storage_root: Hash::zero(), - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.mark_backed(&candidate_a_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info, - base_constraints, - pending_availability, - max_depth, - vec![relay_parent_b_info], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), 2); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_c, - }, - &storage, - false, - ), - vec![1], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_b, - }, - &storage, - false, - ), - vec![2], - ); - } -} diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs new file mode 100644 index 000000000000..fd41be55f7f9 --- /dev/null +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs @@ -0,0 +1,1451 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use assert_matches::assert_matches; +use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; +use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; +use polkadot_primitives_test_helpers as test_helpers; +use rstest::rstest; +use std::iter; + +impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } +} + +fn make_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, +) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: [0; 10].into(), + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash: Hash::repeat_byte(42).into(), + upgrade_restriction: None, + future_validation_code: None, + } +} + +fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, + hrmp_watermark: BlockNumber, +) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 1, + hrmp_watermark, + }, + }; + + (persisted_validation_data, candidate) +} + +#[test] +fn scope_rejects_ancestors_that_skip_blocks() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 10, + hash: Hash::repeat_byte(10), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors + ), + Err(UnexpectedAncestor { number: 8, prev: 10 }) + ); +} + +#[test] +fn scope_rejects_ancestor_for_0_block() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 0, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor { number: 99999, prev: 0 }) + ); +} + +#[test] +fn scope_only_takes_ancestors_up_to_min() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ) + .unwrap(); + + assert_eq!(scope.ancestors.len(), 2); + assert_eq!(scope.ancestors_by_hash.len(), 2); +} + +#[test] +fn storage_add_candidate() { + let mut storage = CandidateStorage::new(); + let relay_parent = Hash::repeat_byte(69); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + relay_parent, + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); +} + +#[test] +fn storage_retain() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let output_head_hash = candidate.commitments.head_data.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + storage.retain(|_| true); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + assert!(storage.head_data_by_hash(&output_head_hash).is_some()); + + storage.retain(|_| false); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + assert!(storage.head_data_by_hash(&output_head_hash).is_none()); +} + +// [`FragmentTree::populate`] should pick up candidates that build on other candidates. +#[test] +fn populate_works_recursively() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert!(candidates.contains(&candidate_a_hash)); + assert!(candidates.contains(&candidate_b_hash)); + + assert_eq!(tree.nodes.len(), 2); + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[0].depth, 0); + + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[1].depth, 1); +} + +#[test] +fn children_of_root_are_contiguous() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b, 1].into(), + 0, + ); + let candidate_a2_hash = candidate_a2.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_a2, pvd_a2).unwrap(); + tree.add_and_populate(candidate_a2_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); +} + +#[test] +fn add_candidate_child_of_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); +} + +#[test] +fn add_candidate_child_of_non_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); +} + +#[test] +fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); +} + +#[rstest] +#[case(true, 13)] +#[case(false, 8)] +// The tree with no cycles looks like: +// Make a tree that looks like this (note that there's no cycle): +// +-(root)-+ +// | | +// +----0---+ 7 +// | | +// 1----+ 5 +// | | +// | | +// 2 6 +// | +// 3 +// | +// 4 +// +// The tree with cycles is the same as the first but has a cycle from 4 back to the state +// produced by 0 (It's bounded by the max_depth + 1). +// +-(root)-+ +// | | +// +----0---+ 7 +// | | +// 1----+ 5 +// | | +// | | +// 2 6 +// | +// 3 +// | +// 4---+ +// | | +// 1 5 +// | +// 2 +// | +// 3 +fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, +) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } +} + +#[test] +fn graceful_cycle_of_0() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 1); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + + for count in 1..10 { + assert_eq!( + tree.find_backable_chain(Ancestors::new(), count, |_| true), + iter::repeat(candidate_a_hash) + .take(std::cmp::min(count as usize, max_depth + 1)) + .collect::>() + ); + assert_eq!( + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), count - 1, |_| true), + iter::repeat(candidate_a_hash) + .take(std::cmp::min(count as usize - 1, max_depth)) + .collect::>() + ); + } +} + +#[test] +fn graceful_cycle_of_1() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 2, |_| true), + vec![candidate_a_hash, candidate_b_hash], + ); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 3, |_| true), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + assert_eq!( + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), + vec![candidate_b_hash, candidate_a_hash], + ); + + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 6, |_| true), + vec![ + candidate_a_hash, + candidate_b_hash, + candidate_a_hash, + candidate_b_hash, + candidate_a_hash + ], + ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } +} + +#[test] +fn hypothetical_depths_known_and_unknown() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_b_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![1, 3], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(21)), + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(22)), + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![1, 3] + ); +} + +#[test] +fn hypothetical_depths_stricter_on_complete() { + let storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 1000, // watermark is illegal + ); + + let candidate_a_hash = candidate_a.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0], + ); + + assert!(tree + .hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Complete { + receipt: Cow::Owned(candidate_a), + persisted_validation_data: Cow::Owned(pvd_a), + }, + &storage, + false, + ) + .is_empty()); +} + +#[test] +fn hypothetical_depths_backed_in_path() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0d].into(), + 0, + ); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_c, pvd_c).unwrap(); + + // `A` and `B` are backed, `C` is not. + storage.mark_backed(&candidate_a_hash); + storage.mark_backed(&candidate_b_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + assert_eq!(tree.nodes.len(), 3); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![0], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![2], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + Vec::::new(), + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![2], // non-empty if `false`. + ); +} + +#[test] +fn pending_availability_in_scope() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_c = Hash::repeat_byte(3); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + // Note that relay parent `a` is not allowed. + let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + let pending_availability = vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + let relay_parent_c_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number + 1, + hash: relay_parent_c, + storage_root: Hash::zero(), + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.mark_backed(&candidate_a_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info, + base_constraints, + pending_availability, + max_depth, + vec![relay_parent_b_info], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), 2); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_c, + }, + &storage, + false, + ), + vec![1], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_b, + }, + &storage, + false, + ), + vec![2], + ); +} diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index acdb256ab36c..7cd1f7ce7281 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -48,6 +48,7 @@ pub(crate) struct RequestResultCache { validation_code: LruMap<(Hash, ParaId, OccupiedCoreAssumption), Option>, validation_code_by_hash: LruMap>, candidate_pending_availability: LruMap<(Hash, ParaId), Option>, + candidates_pending_availability: LruMap<(Hash, ParaId), Vec>, candidate_events: LruMap>, session_executor_params: LruMap>, session_info: LruMap, @@ -86,6 +87,7 @@ impl Default for RequestResultCache { validation_code: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), validation_code_by_hash: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), candidate_pending_availability: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + candidates_pending_availability: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), candidate_events: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), session_executor_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), session_info: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), @@ -261,6 +263,21 @@ impl RequestResultCache { self.candidate_pending_availability.insert(key, value); } + pub(crate) fn candidates_pending_availability( + &mut self, + key: (Hash, ParaId), + ) -> Option<&Vec> { + self.candidates_pending_availability.get(&key).map(|v| &*v) + } + + pub(crate) fn cache_candidates_pending_availability( + &mut self, + key: (Hash, ParaId), + value: Vec, + ) { + self.candidates_pending_availability.insert(key, value); + } + pub(crate) fn candidate_events(&mut self, relay_parent: &Hash) -> Option<&Vec> { self.candidate_events.get(relay_parent).map(|v| &*v) } @@ -591,4 +608,5 @@ pub(crate) enum RequestResult { AsyncBackingParams(Hash, async_backing::AsyncBackingParams), NodeFeatures(SessionIndex, NodeFeatures), ClaimQueue(Hash, BTreeMap>), + CandidatesPendingAvailability(Hash, ParaId, Vec), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 2b7f6fc2d609..b7995aeeee76 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -133,6 +133,9 @@ where CandidatePendingAvailability(relay_parent, para_id, candidate) => self .requests_cache .cache_candidate_pending_availability((relay_parent, para_id), candidate), + CandidatesPendingAvailability(relay_parent, para_id, candidates) => self + .requests_cache + .cache_candidates_pending_availability((relay_parent, para_id), candidates), CandidateEvents(relay_parent, events) => self.requests_cache.cache_candidate_events(relay_parent, events), SessionExecutorParams(_relay_parent, session_index, index) => @@ -252,6 +255,9 @@ where Request::CandidatePendingAvailability(para, sender) => query!(candidate_pending_availability(para), sender) .map(|sender| Request::CandidatePendingAvailability(para, sender)), + Request::CandidatesPendingAvailability(para, sender) => + query!(candidates_pending_availability(para), sender) + .map(|sender| Request::CandidatesPendingAvailability(para, sender)), Request::CandidateEvents(sender) => query!(candidate_events(), sender).map(|sender| Request::CandidateEvents(sender)), Request::SessionExecutorParams(session_index, sender) => { @@ -531,6 +537,12 @@ where ver = 1, sender ), + Request::CandidatesPendingAvailability(para, sender) => query!( + CandidatesPendingAvailability, + candidates_pending_availability(para), + ver = Request::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT, + sender + ), Request::CandidateEvents(sender) => { query!(CandidateEvents, candidate_events(), ver = 1, sender) }, diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 73c661c40762..0113de83c89e 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -47,6 +47,7 @@ struct MockSubsystemClient { validation_outputs_results: HashMap, session_index_for_child: SessionIndex, candidate_pending_availability: HashMap, + candidates_pending_availability: HashMap>, dmq_contents: HashMap>, hrmp_channels: HashMap>>, validation_code_by_hash: HashMap, @@ -140,6 +141,14 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { Ok(self.candidate_pending_availability.get(¶_id).cloned()) } + async fn candidates_pending_availability( + &self, + _: Hash, + para_id: ParaId, + ) -> Result>, ApiError> { + Ok(self.candidates_pending_availability.get(¶_id).cloned().unwrap_or_default()) + } + async fn candidate_events(&self, _: Hash) -> Result>, ApiError> { Ok(self.candidate_events.clone()) } diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index c33674a8f2f9..0d4f4f49e31f 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -31,7 +31,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 50; +const BENCH_COUNT: usize = 5; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,6 +40,8 @@ fn main() -> Result<(), String> { config.n_cores = 10; config.n_validators = 500; config.num_blocks = 3; + config.connectivity = 100; + config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 46a38516898f..9be147bda93a 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -32,7 +32,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 50; +const BENCH_COUNT: usize = 5; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,6 +40,8 @@ fn main() -> Result<(), String> { let options = DataAvailabilityReadOptions { fetch_from_backers: true }; let mut config = TestConfiguration::default(); config.num_blocks = 3; + config.connectivity = 100; + config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index 87cdc389cb35..c3f45314b246 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -442,7 +442,7 @@ impl ClusterTracker { target: LOG_TARGET, pending_statements = ?self.pending, ?parent_hash, - "Cluster has too many pending statements, something wrong with our connection to our group peers \n + "Cluster has too many pending statements, something wrong with our connection to our group peers Restart might be needed if validator gets 0 backing rewards for more than 3-4 consecutive sessions" ); } diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index 2e659716766e..19eefd328994 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -16,7 +16,9 @@ "/dns/boot.gatotech.network/tcp/33400/p2p/12D3KooWEvz5Ygv3MhCUNTVQbUTVhzhvf4KKcNoe5M5YbVLPBeeW", "/dns/boot.gatotech.network/tcp/35400/wss/p2p/12D3KooWEvz5Ygv3MhCUNTVQbUTVhzhvf4KKcNoe5M5YbVLPBeeW", "/dns/paseo-bootnode.turboflakes.io/tcp/30630/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", - "/dns/paseo-bootnode.turboflakes.io/tcp/30730/wss/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e" + "/dns/paseo-bootnode.turboflakes.io/tcp/30730/wss/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", + "/dns/paseo-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr", + "/dns/paseo-boot-ng.dwellir.com/tcp/30354/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr" ], "telemetryEndpoints": null, "protocolId": "pas", @@ -419,4 +421,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 2ca6728af012..e75d80395c4b 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -670,7 +670,7 @@ pub enum RuntimeApiRequest { /// Get validation code by its hash, either past, current or future code can be returned, as /// long as state is still available. ValidationCodeByHash(ValidationCodeHash, RuntimeApiSender>), - /// Get a the candidate pending availability for a particular parachain by parachain / core + /// Get the candidate pending availability for a particular parachain by parachain / core /// index CandidatePendingAvailability(ParaId, RuntimeApiSender>), /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of @@ -739,6 +739,9 @@ pub enum RuntimeApiRequest { /// Fetch the `ClaimQueue` from scheduler pallet /// `V11` ClaimQueue(RuntimeApiSender>>), + /// Get the candidates pending availability for a particular parachain + /// `V11` + CandidatesPendingAvailability(ParaId, RuntimeApiSender>), } impl RuntimeApiRequest { @@ -776,6 +779,9 @@ impl RuntimeApiRequest { /// `ClaimQueue` pub const CLAIM_QUEUE_RUNTIME_REQUIREMENT: u32 = 11; + + /// `candidates_pending_availability` + pub const CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT: u32 = 11; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 664d10ed1af5..e5e1e4d24ef9 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -333,6 +333,14 @@ pub trait RuntimeApiSubsystemClient { // == v11: Claim queue == /// Fetch the `ClaimQueue` from scheduler pallet async fn claim_queue(&self, at: Hash) -> Result>, ApiError>; + + // == v11: Elastic scaling support == + /// Get the receipts of all candidates pending availability for a `ParaId`. + async fn candidates_pending_availability( + &self, + at: Hash, + para_id: Id, + ) -> Result>, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -428,6 +436,14 @@ where self.client.runtime_api().candidate_pending_availability(at, para_id) } + async fn candidates_pending_availability( + &self, + at: Hash, + para_id: Id, + ) -> Result>, ApiError> { + self.client.runtime_api().candidates_pending_availability(at, para_id) + } + async fn candidate_events(&self, at: Hash) -> Result>, ApiError> { self.client.runtime_api().candidate_events(at) } diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index 83b046f0bf0a..b93818070a18 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -296,6 +296,7 @@ specialize_requests! { fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; fn request_validation_code_by_hash(validation_code_hash: ValidationCodeHash) -> Option; ValidationCodeByHash; fn request_candidate_pending_availability(para_id: ParaId) -> Option; CandidatePendingAvailability; + fn request_candidates_pending_availability(para_id: ParaId) -> Vec; CandidatesPendingAvailability; fn request_candidate_events() -> Vec; CandidateEvents; fn request_session_info(index: SessionIndex) -> Option; SessionInfo; fn request_validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index f611936f2701..7bd92be35c15 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -288,5 +288,10 @@ sp_api::decl_runtime_apis! { /// Claim queue #[api_version(11)] fn claim_queue() -> BTreeMap>; + + /***** Added in v11 *****/ + /// Elastic scaling support + #[api_version(11)] + fn candidates_pending_availability(para_id: ppp::Id) -> Vec>; } } diff --git a/polkadot/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md b/polkadot/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md index e118757d83ce..b9f03748d89b 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md +++ b/polkadot/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md @@ -4,5 +4,8 @@ Get the receipt of a candidate pending availability. This returns `Some` for any `availability_cores` and `None` otherwise. ```rust +// Deprectated. fn candidate_pending_availability(at: Block, ParaId) -> Option; +// Use this one +fn candidates_pending_availability(at: Block, ParaId) -> Vec; ``` diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index fd74f33253b7..0700a781d426 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -154,6 +154,8 @@ All failed checks should lead to an unrecoverable error making the block invalid where the changes to the state are expected to be discarded directly after. * `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. +* `candidates_pending_availability(ParaId) -> Vec`: returns the `CommittedCandidateReceipt`s + pending availability for the para provided, if any. * `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. * `free_disputed(disputed: Vec) -> Vec`: Sweeps through all paras pending availability. If @@ -164,10 +166,10 @@ These functions were formerly part of the UMP pallet: * `check_upward_messages(P: ParaId, Vec)`: 1. Checks that the parachain is not currently offboarding and error otherwise. - 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages to be enqueued. - 1. Checks that no message exceeds `config.max_upward_message_size`. - 1. Checks that the total resulting queue size would not exceed `co`. - 1. Verify that queuing up the messages could not result in exceeding the queue's footprint according to the config + 2. Checks that there are at most `config.max_upward_message_num_per_candidate` messages to be enqueued. + 3. Checks that no message exceeds `config.max_upward_message_size`. + 4. Checks that the total resulting queue size would not exceed `co`. + 5. Verify that queuing up the messages could not result in exceeding the queue's footprint according to the config items `config.max_upward_queue_count` and `config.max_upward_queue_size`. The queue's current footprint is provided in `well_known_keys` in order to facilitate oraclisation on to the para. diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index cc1243790c2e..85531e9c04fc 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -19,7 +19,7 @@ use frame_support::traits::{ fungible::{Balanced, Credit}, tokens::imbalance::ResolveTo, - Imbalance, OnUnbalanced, + Contains, ContainsPair, Imbalance, OnUnbalanced, }; use pallet_treasury::TreasuryAccountId; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -156,6 +156,26 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation } } +/// Adapter for [`Contains`] trait to match [`VersionedLocatableAsset`] type converted to the latest +/// version of itself where it's location matched by `L` and it's asset id by `A` parameter types. +pub struct ContainsParts(core::marker::PhantomData); +impl Contains for ContainsParts +where + C: ContainsPair, +{ + fn contains(asset: &VersionedLocatableAsset) -> bool { + use VersionedLocatableAsset::*; + let (location, asset_id) = match asset.clone() { + V3 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { + (Ok(l), Ok(a)) => (l, a), + _ => return false, + }, + V4 { location, asset_id } => (location, asset_id), + }; + C::contains(&location, &asset_id.0) + } +} + #[cfg(feature = "runtime-benchmarks")] pub mod benchmarks { use super::VersionedLocatableAsset; diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 05a540aef828..65652b38577b 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -65,6 +65,7 @@ pub trait WeightInfo { fn force_open_hrmp_channel(c: u32) -> Weight; fn establish_system_channel() -> Weight; fn poke_channel_deposits() -> Weight; + fn establish_channel_with_system() -> Weight; } /// A weight info that is only suitable for testing. @@ -104,6 +105,9 @@ impl WeightInfo for TestWeightInfo { fn poke_channel_deposits() -> Weight { Weight::MAX } + fn establish_channel_with_system() -> Weight { + Weight::MAX + } } /// A description of a request to open an HRMP channel. @@ -270,6 +274,10 @@ pub mod pallet { /// implementation should be the same as `Balance` as used in the `Configuration`. type Currency: ReservableCurrency; + /// The default channel size and capacity to use when opening a channel to a system + /// parachain. + type DefaultChannelSizeAndCapacityWithSystem: Get<(u32, u32)>; + /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; } @@ -297,7 +305,7 @@ pub mod pallet { proposed_max_capacity: u32, proposed_max_message_size: u32, }, - /// An HRMP channel was opened between two system chains. + /// An HRMP channel was opened with a system chain. HrmpSystemChannelOpened { sender: ParaId, recipient: ParaId, @@ -836,6 +844,50 @@ pub mod pallet { Ok(()) } + + /// Establish a bidirectional HRMP channel between a parachain and a system chain. + /// + /// Arguments: + /// + /// - `target_system_chain`: A system chain, `ParaId`. + /// + /// The origin needs to be the parachain origin. + #[pallet::call_index(10)] + #[pallet::weight(::WeightInfo::establish_channel_with_system())] + pub fn establish_channel_with_system( + origin: OriginFor, + target_system_chain: ParaId, + ) -> DispatchResultWithPostInfo { + let sender = ensure_parachain(::RuntimeOrigin::from(origin))?; + + ensure!(target_system_chain.is_system(), Error::::ChannelCreationNotAuthorized); + + let (max_message_size, max_capacity) = + T::DefaultChannelSizeAndCapacityWithSystem::get(); + + // create bidirectional channel + Self::init_open_channel(sender, target_system_chain, max_capacity, max_message_size)?; + Self::accept_open_channel(target_system_chain, sender)?; + + Self::init_open_channel(target_system_chain, sender, max_capacity, max_message_size)?; + Self::accept_open_channel(sender, target_system_chain)?; + + Self::deposit_event(Event::HrmpSystemChannelOpened { + sender, + recipient: target_system_chain, + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + }); + + Self::deposit_event(Event::HrmpSystemChannelOpened { + sender: target_system_chain, + recipient: sender, + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + }); + + Ok(Pays::No.into()) + } } } diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 13f4cdfe3eea..8bf444e647e2 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -50,6 +50,13 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } +fn assert_has_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + + assert!(events.iter().any(|record| record.event == system_event)); +} + /// Enumerates the phase in the setup process of a channel between two parachains. enum ParachainSetupStep { /// A channel open has been requested @@ -517,6 +524,43 @@ mod benchmarks { ); } + #[benchmark] + fn establish_channel_with_system() { + let sender_id = 1u32; + let recipient_id: ParaId = 2u32.into(); + + let sender_origin: crate::Origin = sender_id.into(); + + // make sure para is registered, and has zero balance. + register_parachain_with_balance::(sender_id.into(), Zero::zero()); + register_parachain_with_balance::(recipient_id, Zero::zero()); + + #[extrinsic_call] + _(sender_origin, recipient_id); + + let (max_message_size, max_capacity) = T::DefaultChannelSizeAndCapacityWithSystem::get(); + + assert_has_event::( + Event::::HrmpSystemChannelOpened { + sender: sender_id.into(), + recipient: recipient_id, + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + } + .into(), + ); + + assert_has_event::( + Event::::HrmpSystemChannelOpened { + sender: recipient_id, + recipient: sender_id.into(), + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + } + .into(), + ); + } + impl_benchmark_test_suite!( Hrmp, crate::mock::new_test_ext(crate::hrmp::tests::GenesisConfigBuilder::default().build()), diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 8d43b866bc19..2f767ab7e1b1 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -27,7 +27,7 @@ use crate::{ }, shared, }; -use frame_support::{assert_noop, assert_ok}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use primitives::BlockNumber; use std::collections::BTreeMap; @@ -935,3 +935,72 @@ fn watermark_maxed_out_at_relay_parent() { Hrmp::assert_storage_consistency_exhaustive(); }); } + +#[test] +fn establish_channel_with_system_works() { + let para_a = 2000.into(); + let para_a_origin: crate::Origin = 2000.into(); + let para_b = 3.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + Hrmp::establish_channel_with_system(para_a_origin.into(), para_b).unwrap(); + Hrmp::assert_storage_consistency_exhaustive(); + assert!(System::events().iter().any(|record| record.event == + MockEvent::Hrmp(Event::HrmpSystemChannelOpened { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 1, + proposed_max_message_size: 4 + }))); + + assert!(System::events().iter().any(|record| record.event == + MockEvent::Hrmp(Event::HrmpSystemChannelOpened { + sender: para_b, + recipient: para_a, + proposed_max_capacity: 1, + proposed_max_message_size: 4 + }))); + + // Advance to a block 6, but without session change. That means that the channel has + // not been created yet. + run_to_block(6, None); + assert!(!channel_exists(para_a, para_b)); + assert!(!channel_exists(para_b, para_a)); + Hrmp::assert_storage_consistency_exhaustive(); + + // Now let the session change happen and thus open the channel. + run_to_block(8, Some(vec![8])); + assert!(channel_exists(para_a, para_b)); + assert!(channel_exists(para_b, para_a)); + Hrmp::assert_storage_consistency_exhaustive(); + }); +} + +#[test] +fn establish_channel_with_system_with_invalid_args() { + let para_a = 2001.into(); + let para_a_origin: crate::Origin = 2000.into(); + let para_b = 2003.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + assert_noop!( + Hrmp::establish_channel_with_system(RuntimeOrigin::signed(1), para_b), + BadOrigin + ); + assert_noop!( + Hrmp::establish_channel_with_system(para_a_origin.into(), para_b), + Error::::ChannelCreationNotAuthorized + ); + Hrmp::assert_storage_consistency_exhaustive(); + }); +} diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 9d60bbb23b6f..903d01aa5c9c 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -1104,6 +1104,24 @@ impl Pallet { }) } + /// Returns all the `CommittedCandidateReceipt` pending availability for the para provided, if + /// any. + pub(crate) fn candidates_pending_availability( + para: ParaId, + ) -> Vec> { + >::get(¶) + .map(|candidates| { + candidates + .into_iter() + .map(|candidate| CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments: candidate.commitments.clone(), + }) + .collect() + }) + .unwrap_or_default() + } + /// Returns the metadata around the first candidate pending availability for the /// para provided, if any. pub(crate) fn pending_availability( diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 461b9f4b431a..c91f5be127cd 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -248,6 +248,7 @@ impl crate::dmp::Config for Test {} parameter_types! { pub const FirstMessageFactorPercent: u64 = 100; + pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (4, 1); } impl crate::hrmp::Config for Test { @@ -255,6 +256,7 @@ impl crate::hrmp::Config for Test { type RuntimeEvent = RuntimeEvent; type ChannelManager = frame_system::EnsureRoot; type Currency = pallet_balances::Pallet; + type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; type WeightInfo = crate::hrmp::TestWeightInfo; } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 39d4f520994a..3dca38050a0a 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -301,6 +301,10 @@ pub fn validation_code( } /// Implementation for the `candidate_pending_availability` function of the runtime API. +#[deprecated( + note = "`candidate_pending_availability` will be removed. Use `candidates_pending_availability` to query + all candidates pending availability" +)] pub fn candidate_pending_availability( para_id: ParaId, ) -> Option> { diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 046f6041f0b8..32bbdca84a3c 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,12 +16,24 @@ //! Put implementations of functions from staging APIs here. -use crate::scheduler; -use primitives::{CoreIndex, Id as ParaId}; -use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; +use crate::{inclusion, initializer, scheduler}; +use primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; +use sp_runtime::traits::One; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec::Vec, +}; /// Returns the claimqueue from the scheduler pub fn claim_queue() -> BTreeMap> { + let now = >::block_number() + One::one(); + + // This explicit update is only strictly required for session boundaries: + // + // At the end of a session we clear the claim queues: Without this update call, nothing would be + // scheduled to the client. + >::free_cores_and_fill_claimqueue(Vec::new(), now); + scheduler::ClaimQueue::::get() .into_iter() .map(|(core_index, entries)| { @@ -29,3 +41,11 @@ pub fn claim_queue() -> BTreeMap( + para_id: ParaId, +) -> Vec> { + >::candidates_pending_availability(para_id) +} diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 20a914fb8085..bbe19310f970 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -70,6 +70,7 @@ pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default- pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } pallet-nis = { path = "../../../substrate/frame/nis", default-features = false } pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } +pallet-parameters = { path = "../../../substrate/frame/parameters", default-features = false } pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } pallet-ranked-collective = { path = "../../../substrate/frame/ranked-collective", default-features = false } @@ -164,6 +165,7 @@ std = [ "pallet-multisig/std", "pallet-nis/std", "pallet-offences/std", + "pallet-parameters/std", "pallet-preimage/std", "pallet-proxy/std", "pallet-ranked-collective/std", @@ -239,6 +241,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-nis/runtime-benchmarks", "pallet-offences/runtime-benchmarks", + "pallet-parameters/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-ranked-collective/runtime-benchmarks", @@ -294,6 +297,7 @@ try-runtime = [ "pallet-multisig/try-runtime", "pallet-nis/try-runtime", "pallet-offences/try-runtime", + "pallet-parameters/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", "pallet-ranked-collective/try-runtime", diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 894d7fac2f0a..ba80fa6942c7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -25,6 +25,10 @@ use beefy_primitives::{ ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, mmr::{BeefyDataProvider, MmrLeafVersion}, }; +use frame_support::{ + dynamic_params::{dynamic_pallet_params, dynamic_params}, + traits::FromContains, +}; use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ @@ -39,7 +43,8 @@ use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ - LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, traits::{Leaser, OnSwap}, @@ -73,9 +78,10 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, - InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, - ProcessMessageError, StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, tokens::UnityOrOuterConversion, Contains, EitherOf, + EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, EverythingBut, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, + StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -85,7 +91,7 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; -use sp_core::{ConstU128, OpaqueMetadata, H256}; +use sp_core::{ConstU128, ConstU8, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ @@ -234,6 +240,72 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +/// Dynamic params that can be adjusted at runtime. +#[dynamic_params(RuntimeParameters, pallet_parameters::Parameters::)] +pub mod dynamic_params { + use super::*; + + #[dynamic_pallet_params] + #[codec(index = 0)] + pub mod nis { + use super::*; + + #[codec(index = 0)] + pub static Target: Perquintill = Perquintill::zero(); + + #[codec(index = 1)] + pub static MinBid: Balance = 100 * UNITS; + } + + #[dynamic_pallet_params] + #[codec(index = 1)] + pub mod preimage { + use super::*; + + #[codec(index = 0)] + pub static BaseDeposit: Balance = deposit(2, 64); + + #[codec(index = 1)] + pub static ByteDeposit: Balance = deposit(0, 1); + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl Default for RuntimeParameters { + fn default() -> Self { + RuntimeParameters::Preimage(dynamic_params::preimage::Parameters::BaseDeposit( + dynamic_params::preimage::BaseDeposit, + Some(1u32.into()), + )) + } +} + +/// Defines what origin can modify which dynamic parameters. +pub struct DynamicParameterOrigin; +impl EnsureOriginWithArg for DynamicParameterOrigin { + type Success = (); + + fn try_origin( + origin: RuntimeOrigin, + key: &RuntimeParametersKey, + ) -> Result { + use crate::{dynamic_params::*, governance::*, RuntimeParametersKey::*}; + + match key { + Nis(nis::ParametersKey::MinBid(_)) => StakingAdmin::ensure_origin(origin.clone()), + Nis(nis::ParametersKey::Target(_)) => GeneralAdmin::ensure_origin(origin.clone()), + Preimage(_) => frame_system::ensure_root(origin.clone()), + } + .map_err(|_| origin) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(_key: &RuntimeParametersKey) -> Result { + // Provide the origin for the parameter returned by `Default`: + Ok(RuntimeOrigin::root()) + } +} + impl pallet_scheduler::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; @@ -250,8 +322,6 @@ impl pallet_scheduler::Config for Runtime { } parameter_types! { - pub const PreimageBaseDeposit: Balance = deposit(2, 64); - pub const PreimageByteDeposit: Balance = deposit(0, 1); pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); } @@ -264,7 +334,11 @@ impl pallet_preimage::Config for Runtime { AccountId, Balances, PreimageHoldReason, - LinearStoragePrice, + LinearStoragePrice< + dynamic_params::preimage::BaseDeposit, + dynamic_params::preimage::ByteDeposit, + Balance, + >, >; } @@ -453,7 +527,15 @@ impl pallet_treasury::Config for Runtime { LocatableAssetConverter, VersionedLocationConverter, >; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; @@ -950,11 +1032,16 @@ impl pallet_message_queue::Config for Runtime { impl parachains_dmp::Config for Runtime {} +parameter_types! { + pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500); +} + impl parachains_hrmp::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type ChannelManager = EnsureRoot; type Currency = Balances; + type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo; } @@ -1123,12 +1210,10 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const NisBasePeriod: BlockNumber = 30 * DAYS; - pub const MinBid: Balance = 100 * UNITS; pub MinReceipt: Perquintill = Perquintill::from_rational(1u64, 10_000_000u64); pub const IntakePeriod: BlockNumber = 5 * MINUTES; pub MaxIntakeWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 10; pub const ThawThrottle: (Perquintill, BlockNumber) = (Perquintill::from_percent(25), 5); - pub storage NisTarget: Perquintill = Perquintill::zero(); pub const NisPalletId: PalletId = PalletId(*b"py/nis "); } @@ -1142,13 +1227,13 @@ impl pallet_nis::Config for Runtime { type CounterpartAmount = WithMaximumOf>; type Deficit = (); // Mint type IgnoredIssuance = (); - type Target = NisTarget; + type Target = dynamic_params::nis::Target; type PalletId = NisPalletId; type QueueCount = ConstU32<300>; type MaxQueueLen = ConstU32<1000>; type FifoQueueLen = ConstU32<250>; type BasePeriod = NisBasePeriod; - type MinBid = MinBid; + type MinBid = dynamic_params::nis::MinBid; type MinReceipt = MinReceipt; type IntakePeriod = IntakePeriod; type MaxIntakeWeight = MaxIntakeWeight; @@ -1158,6 +1243,13 @@ impl pallet_nis::Config for Runtime { type BenchmarkSetup = (); } +impl pallet_parameters::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeParameters = RuntimeParameters; + type AdminOrigin = DynamicParameterOrigin; + type WeightInfo = weights::pallet_parameters::WeightInfo; +} + parameter_types! { pub BeefySetIdSessionEntries: u32 = BondingDuration::get() * SessionsPerEra::get(); } @@ -1190,6 +1282,7 @@ impl pallet_mmr::Config for Runtime { type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; type WeightInfo = (); type LeafData = pallet_beefy_mmr::Pallet; + type BlockHashProvider = pallet_mmr::DefaultBlockHashProvider; } parameter_types! { @@ -1285,6 +1378,7 @@ construct_runtime! { Timestamp: pallet_timestamp = 2, Indices: pallet_indices = 3, Balances: pallet_balances = 4, + Parameters: pallet_parameters = 6, TransactionPayment: pallet_transaction_payment = 33, // Consensus support. @@ -1625,6 +1719,7 @@ mod benches { [pallet_indices, Indices] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] + [pallet_parameters, Parameters] [pallet_preimage, Preimage] [pallet_proxy, Proxy] [pallet_ranked_collective, FellowshipCollective] @@ -1790,6 +1885,7 @@ sp_api::impl_runtime_apis! { } fn candidate_pending_availability(para_id: ParaId) -> Option> { + #[allow(deprecated)] parachains_runtime_api_impl::candidate_pending_availability::(para_id) } @@ -1903,6 +1999,10 @@ sp_api::impl_runtime_apis! { fn claim_queue() -> BTreeMap> { vstaging_parachains_runtime_api_impl::claim_queue::() } + + fn candidates_pending_availability(para_id: ParaId) -> Vec> { + vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + } } #[api_version(3)] diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 7328dca93936..3c6845dfb43e 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod pallet_indices; pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nis; +pub mod pallet_parameters; pub mod pallet_preimage; pub mod pallet_proxy; pub mod pallet_ranked_collective; diff --git a/polkadot/runtime/rococo/src/weights/pallet_parameters.rs b/polkadot/runtime/rococo/src/weights/pallet_parameters.rs new file mode 100644 index 000000000000..bd2bcf960e9b --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_parameters.rs @@ -0,0 +1,63 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_parameters` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_parameters +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_parameters`. +pub struct WeightInfo(PhantomData); +impl pallet_parameters::WeightInfo for WeightInfo { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + fn set_parameter() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3493` + // Minimum execution time: 6_937_000 picoseconds. + Weight::from_parts(7_242_000, 0) + .saturating_add(Weight::from_parts(0, 3493)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 417820e6627f..97b84155b36a 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -331,4 +331,14 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + fn establish_channel_with_system() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index f01382509501..514643c0a201 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -22,15 +22,19 @@ use pallet_transaction_payment::FungibleAdapter; use parity_scale_codec::Encode; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::*, +}; use polkadot_runtime_parachains::{ assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, - disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, - inclusion as parachains_inclusion, initializer as parachains_initializer, - origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v10 as runtime_impl, + disputes::slashing as parachains_slashing, + dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, + initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, + runtime_api_impl::{v10 as runtime_impl, vstaging as vstaging_parachains_runtime_api_impl}, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -53,9 +57,9 @@ use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints; use primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - Hash as HashT, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash as HashT, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo as SessionInfoData, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; @@ -554,6 +558,7 @@ impl parachains_dmp::Config for Runtime {} parameter_types! { pub const FirstMessageFactorPercent: u64 = 100; + pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500); } impl parachains_hrmp::Config for Runtime { @@ -561,6 +566,7 @@ impl parachains_hrmp::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelManager = frame_system::EnsureRoot; type Currency = Balances; + type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; type WeightInfo = parachains_hrmp::TestWeightInfo; } @@ -829,7 +835,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(10)] + #[api_version(11)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() @@ -877,6 +883,7 @@ sp_api::impl_runtime_apis! { } fn candidate_pending_availability(para_id: ParaId) -> Option> { + #[allow(deprecated)] runtime_impl::candidate_pending_availability::(para_id) } @@ -981,6 +988,14 @@ sp_api::impl_runtime_apis! { fn node_features() -> primitives::NodeFeatures { runtime_impl::node_features::() } + + fn claim_queue() -> BTreeMap> { + vstaging_parachains_runtime_api_impl::claim_queue::() + } + + fn candidates_pending_availability(para_id: ParaId) -> Vec> { + vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 4930610c1d80..a06a1e1f7fc8 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -31,9 +31,9 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, - InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, - ProcessMessageError, WithdrawReasons, + fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, + EitherOfDiverse, EverythingBut, FromContains, InstanceFilter, KeyOwnerProofSystem, + LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -57,7 +57,8 @@ use runtime_common::{ elections::OnChainAccuracy, identity_migrator, impl_runtime_weights, impls::{ - LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, traits::{Leaser, OnSwap}, @@ -80,7 +81,7 @@ use runtime_parachains::{ shared as parachains_shared, }; use scale_info::TypeInfo; -use sp_core::{OpaqueMetadata, RuntimeDebug, H256}; +use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ create_runtime_str, curve::PiecewiseLinear, @@ -333,6 +334,7 @@ impl pallet_mmr::Config for Runtime { type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; type WeightInfo = (); type LeafData = pallet_beefy_mmr::Pallet; + type BlockHashProvider = pallet_mmr::DefaultBlockHashProvider; } /// MMR helper types. @@ -711,7 +713,15 @@ impl pallet_treasury::Config for Runtime { LocatableAssetConverter, VersionedLocationConverter, >; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; @@ -1153,11 +1163,16 @@ impl pallet_message_queue::Config for Runtime { impl parachains_dmp::Config for Runtime {} +parameter_types! { + pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (4096, 4); +} + impl parachains_hrmp::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type ChannelManager = EnsureRoot; type Currency = Balances; + type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo; } @@ -1848,6 +1863,7 @@ sp_api::impl_runtime_apis! { } fn candidate_pending_availability(para_id: ParaId) -> Option> { + #[allow(deprecated)] parachains_runtime_api_impl::candidate_pending_availability::(para_id) } @@ -1961,6 +1977,10 @@ sp_api::impl_runtime_apis! { fn claim_queue() -> BTreeMap> { vstaging_parachains_runtime_api_impl::claim_queue::() } + + fn candidates_pending_availability(para_id: ParaId) -> Vec> { + vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs index 9beb15303d87..3d2ab827b8fd 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs @@ -324,4 +324,14 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + fn establish_channel_with_system() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `6357` + // Minimum execution time: 629_674_000 picoseconds. + Weight::from_parts(640_174_000, 0) + .saturating_add(Weight::from_parts(0, 6357)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(8)) + } } diff --git a/polkadot/tests/benchmark_block.rs b/polkadot/tests/benchmark_block.rs index 99f95ef611a4..bc2680259850 100644 --- a/polkadot/tests/benchmark_block.rs +++ b/polkadot/tests/benchmark_block.rs @@ -58,7 +58,13 @@ async fn build_chain(runtime: &str, base_path: &Path) { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(["--chain", runtime, "--force-authoring", "--alice"]) + .args([ + "--chain", + runtime, + "--force-authoring", + "--alice", + "--unsafe-force-node-key-generation", + ]) .arg("-d") .arg(base_path) .arg("--no-hardware-benchmarks") diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index e2903d592dc1..5d2e0f7b96f9 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -18,10 +18,7 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{ - traits::fungible::{Inspect, Mutate}, - weights::Weight, -}; +use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; @@ -90,11 +87,6 @@ pub trait Config: crate::Config { } benchmarks! { - where_clause { - where - T: pallet_balances::Config, - ::Balance: From + Into, - } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -129,11 +121,7 @@ benchmarks! { BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: Assets = asset.into(); + let assets: Assets = asset.clone().into(); let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); @@ -150,13 +138,26 @@ benchmarks! { FeeReason::ChargeFees, ); - // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) - let balance = as Inspect<_>>::balance(&caller); - // Add transferred_amount to origin - as Mutate<_>>::mint_into(&caller, transferred_amount)?; - // verify initial balance - let balance = balance + transferred_amount; - assert_eq!( as Inspect<_>>::balance(&caller), balance); + match &asset.fun { + Fungible(amount) => { + // Add transferred_amount to origin + ::AssetTransactor::deposit_asset( + &Asset { fun: Fungible(*amount), id: asset.id }, + &origin_location, + None, + ).map_err(|error| { + log::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } + }; let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); @@ -164,21 +165,13 @@ benchmarks! { AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); - } reserve_transfer_assets { let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: Assets = asset.into(); + let assets: Assets = asset.clone().into(); let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); @@ -195,23 +188,50 @@ benchmarks! { FeeReason::ChargeFees, ); - // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) - let balance = as Inspect<_>>::balance(&caller); - // Add transferred_amount to origin - as Mutate<_>>::mint_into(&caller, transferred_amount)?; - // verify initial balance - let balance = balance + transferred_amount; - assert_eq!( as Inspect<_>>::balance(&caller), balance); + match &asset.fun { + Fungible(amount) => { + // Add transferred_amount to origin + ::AssetTransactor::deposit_asset( + &Asset { fun: Fungible(*amount), id: asset.id.clone() }, + &origin_location, + None, + ).map_err(|error| { + log::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } + }; let recipient = [0u8; 32]; - let versioned_dest: VersionedLocation = destination.into(); + let versioned_dest: VersionedLocation = destination.clone().into(); let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); + match &asset.fun { + Fungible(amount) => { + assert_ok!(::AssetTransactor::withdraw_asset( + &Asset { fun: Fungible(*amount), id: asset.id }, + &destination, + None, + )); + }, + NonFungible(instance) => { + assert_ok!(::AssetTransactor::withdraw_asset( + &asset, + &destination, + None, + )); + } + }; } transfer_assets { diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index ef255068734a..cf22b86cf82c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1299,64 +1299,20 @@ pub mod pallet { ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); - let mut assets = assets.into_inner(); + let assets = assets.into_inner(); let fee_asset_item = fee_asset_item as usize; - let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); // Find transfer types for fee and non-fee assets. let (fees_transfer_type, assets_transfer_type) = Self::find_fee_and_assets_transfer_types(&assets, fee_asset_item, &dest)?; - // local and remote XCM programs to potentially handle fees separately - let fees = if fees_transfer_type == assets_transfer_type { - // no need for custom fees instructions, fees are batched with assets - FeesHandling::Batched { fees } - } else { - // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered - // by branch above). The reason for this is that we'd need to send XCMs to separate - // chains with no guarantee of delivery order on final destination; therefore we - // cannot guarantee to have fees in place on final destination chain to pay for - // assets transfer. - ensure!( - !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), - Error::::InvalidAssetUnsupportedReserve - ); - let weight_limit = weight_limit.clone(); - // remove `fees` from `assets` and build separate fees transfer instructions to be - // added to assets transfers XCM programs - let fees = assets.remove(fee_asset_item); - let (local_xcm, remote_xcm) = match fees_transfer_type { - TransferType::LocalReserve => Self::local_reserve_fees_instructions( - origin.clone(), - dest.clone(), - fees, - weight_limit, - )?, - TransferType::DestinationReserve => - Self::destination_reserve_fees_instructions( - origin.clone(), - dest.clone(), - fees, - weight_limit, - )?, - TransferType::Teleport => Self::teleport_fees_instructions( - origin.clone(), - dest.clone(), - fees, - weight_limit, - )?, - TransferType::RemoteReserve(_) => - return Err(Error::::InvalidAssetUnsupportedReserve.into()), - }; - FeesHandling::Separate { local_xcm, remote_xcm } - }; - - Self::build_and_execute_xcm_transfer_type( + Self::do_transfer_assets( origin, dest, beneficiary, assets, assets_transfer_type, - fees, + fee_asset_item, + fees_transfer_type, weight_limit, ) } @@ -1443,6 +1399,87 @@ pub mod pallet { >::send_blob(origin, dest, encoded_message)?; Ok(()) } + + /// Transfer assets from the local chain to the destination chain using explicit transfer + /// types for assets and fees. + /// + /// `assets` must have same reserve location or may be teleportable to `dest`. Caller must + /// provide the `assets_transfer_type` to be used for `assets`: + /// - `TransferType::LocalReserve`: transfer assets to sovereign account of destination + /// chain and forward a notification XCM to `dest` to mint and deposit reserve-based + /// assets to `beneficiary`. + /// - `TransferType::DestinationReserve`: burn local assets and forward a notification to + /// `dest` chain to withdraw the reserve assets from this chain's sovereign account and + /// deposit them to `beneficiary`. + /// - `TransferType::RemoteReserve(reserve)`: burn local assets, forward XCM to `reserve` + /// chain to move reserves from this chain's SA to `dest` chain's SA, and forward another + /// XCM to `dest` to mint and deposit reserve-based assets to `beneficiary`. Typically + /// the remote `reserve` is Asset Hub. + /// - `TransferType::Teleport`: burn local assets and forward XCM to `dest` chain to + /// mint/teleport assets and deposit them to `beneficiary`. + /// + /// Fee payment on the source, destination and all intermediary hops, is specified through + /// `fees_id`, but make sure enough of the specified `fees_id` asset is included in the + /// given list of `assets`. `fees_id` should be enough to pay for `weight_limit`. If more + /// weight is needed than `weight_limit`, then the operation will fail and the sent assets + /// may be at risk. + /// + /// `fees_id` may use different transfer type than rest of `assets` and can be specified + /// through `fees_transfer_type`. + /// + /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. + /// - `dest`: Destination context for the assets. Will typically be `[Parent, + /// Parachain(..)]` to send from parachain to parachain, or `[Parachain(..)]` to send from + /// relay to parachain, or `(parents: 2, (GlobalConsensus(..), ..))` to send from + /// parachain across a bridge to another ecosystem destination. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` (and possibly reserve) chains. + /// - `assets_transfer_type`: The XCM `TransferType` used to transfer the `assets`. + /// - `fees_id`: One of the included `assets` to be be used to pay fees. + /// - `fees_transfer_type`: The XCM `TransferType` used to transfer the `fees` assets. + /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. + #[pallet::call_index(15)] + #[pallet::weight(T::WeightInfo::transfer_assets())] + pub fn transfer_assets_using_type( + origin: OriginFor, + dest: Box, + beneficiary: Box, + assets: Box, + assets_transfer_type: Box, + fees_id: Box, + fees_transfer_type: Box, + weight_limit: WeightLimit, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let dest: Location = (*dest).try_into().map_err(|()| Error::::BadVersion)?; + let beneficiary: Location = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + let fees_id: AssetId = (*fees_id).try_into().map_err(|()| Error::::BadVersion)?; + log::debug!( + target: "xcm::pallet_xcm::transfer_assets_using_type", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?} through {:?}, fees-id {:?} through {:?}", + origin_location, dest, beneficiary, assets, assets_transfer_type, fees_id, fees_transfer_type, + ); + + let assets = assets.into_inner(); + ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); + + let fee_asset_index = + assets.iter().position(|a| a.id == fees_id).ok_or(Error::::FeesNotMet)?; + Self::do_transfer_assets( + origin_location, + dest, + beneficiary, + assets, + *assets_transfer_type, + fee_asset_index, + *fees_transfer_type, + weight_limit, + ) + } } } @@ -1607,15 +1644,16 @@ impl Pallet { // Ensure all assets (including fees) have same reserve location. ensure!(assets_transfer_type == fees_transfer_type, Error::::TooManyReserves); - Self::build_and_execute_xcm_transfer_type( - origin, - dest, + let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( + origin.clone(), + dest.clone(), beneficiary, assets, assets_transfer_type, FeesHandling::Batched { fees }, weight_limit, - ) + )?; + Self::execute_xcm_transfer(origin, dest, local_xcm, remote_xcm) } fn do_teleport_assets( @@ -1648,18 +1686,85 @@ impl Pallet { } let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); - Self::build_and_execute_xcm_transfer_type( - origin_location, - dest, + let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( + origin_location.clone(), + dest.clone(), beneficiary, assets, TransferType::Teleport, FeesHandling::Batched { fees }, weight_limit, - ) + )?; + Self::execute_xcm_transfer(origin_location, dest, local_xcm, remote_xcm) + } + + fn do_transfer_assets( + origin: Location, + dest: Location, + beneficiary: Location, + mut assets: Vec, + assets_transfer_type: TransferType, + fee_asset_index: usize, + fees_transfer_type: TransferType, + weight_limit: WeightLimit, + ) -> DispatchResult { + // local and remote XCM programs to potentially handle fees separately + let fees = if fees_transfer_type == assets_transfer_type { + let fees = assets.get(fee_asset_index).ok_or(Error::::Empty)?.clone(); + // no need for custom fees instructions, fees are batched with assets + FeesHandling::Batched { fees } + } else { + // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered + // by branch above). The reason for this is that we'd need to send XCMs to separate + // chains with no guarantee of delivery order on final destination; therefore we + // cannot guarantee to have fees in place on final destination chain to pay for + // assets transfer. + ensure!( + !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), + Error::::InvalidAssetUnsupportedReserve + ); + let weight_limit = weight_limit.clone(); + // remove `fees` from `assets` and build separate fees transfer instructions to be + // added to assets transfers XCM programs + let fees = assets.remove(fee_asset_index); + let (local_xcm, remote_xcm) = match fees_transfer_type { + TransferType::LocalReserve => Self::local_reserve_fees_instructions( + origin.clone(), + dest.clone(), + fees, + weight_limit, + )?, + TransferType::DestinationReserve => Self::destination_reserve_fees_instructions( + origin.clone(), + dest.clone(), + fees, + weight_limit, + )?, + TransferType::Teleport => Self::teleport_fees_instructions( + origin.clone(), + dest.clone(), + fees, + weight_limit, + )?, + TransferType::RemoteReserve(_) => + return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }; + FeesHandling::Separate { local_xcm, remote_xcm } + }; + + let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( + origin.clone(), + dest.clone(), + beneficiary, + assets, + assets_transfer_type, + fees, + weight_limit, + )?; + Self::execute_xcm_transfer(origin, dest, local_xcm, remote_xcm) } - fn build_and_execute_xcm_transfer_type( + fn build_xcm_transfer_type( origin: Location, dest: Location, beneficiary: Location, @@ -1667,14 +1772,14 @@ impl Pallet { transfer_type: TransferType, fees: FeesHandling, weight_limit: WeightLimit, - ) -> DispatchResult { + ) -> Result<(Xcm<::RuntimeCall>, Option>), Error> { log::debug!( - target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + target: "xcm::pallet_xcm::build_xcm_transfer_type", "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ fees_handling {:?}, weight_limit: {:?}", origin, dest, beneficiary, assets, transfer_type, fees, weight_limit, ); - let (mut local_xcm, remote_xcm) = match transfer_type { + Ok(match transfer_type { TransferType::LocalReserve => { let (local, remote) = Self::local_reserve_transfer_programs( origin.clone(), @@ -1704,7 +1809,7 @@ impl Pallet { }; let local = Self::remote_reserve_transfer_program( origin.clone(), - reserve, + reserve.try_into().map_err(|()| Error::::BadVersion)?, dest.clone(), beneficiary, assets, @@ -1724,7 +1829,21 @@ impl Pallet { )?; (local, Some(remote)) }, - }; + }) + } + + fn execute_xcm_transfer( + origin: Location, + dest: Location, + mut local_xcm: Xcm<::RuntimeCall>, + remote_xcm: Option>, + ) -> DispatchResult { + log::debug!( + target: "xcm::pallet_xcm::execute_xcm_transfer", + "origin {:?}, dest {:?}, local_xcm {:?}, remote_xcm {:?}", + origin, dest, local_xcm, remote_xcm, + ); + let weight = T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; let mut hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); @@ -1738,7 +1857,7 @@ impl Pallet { Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); outcome.ensure_complete().map_err(|error| { log::error!( - target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + target: "xcm::pallet_xcm::execute_xcm_transfer", "XCM execution failed with error {:?}", error ); Error::::LocalExecutionIncomplete @@ -1750,7 +1869,7 @@ impl Pallet { if origin != Here.into_location() { Self::charge_fees(origin.clone(), price).map_err(|error| { log::error!( - target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + target: "xcm::pallet_xcm::execute_xcm_transfer", "Unable to charge fee with error {:?}", error ); Error::::FeesNotMet @@ -1935,6 +2054,7 @@ impl Pallet { ]); // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; + // deposit all remaining assets in holding to `beneficiary` location xcm_on_dest .inner_mut() diff --git a/polkadot/xcm/xcm-builder/src/asset_conversion.rs b/polkadot/xcm/xcm-builder/src/asset_conversion.rs index e38af149be54..520ce87448ea 100644 --- a/polkadot/xcm/xcm-builder/src/asset_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/asset_conversion.rs @@ -107,17 +107,6 @@ impl< #[deprecated = "Use `ConvertedConcreteId` instead"] pub type ConvertedConcreteAssetId = ConvertedConcreteId; -pub struct V4V3LocationConverter; -impl MaybeEquivalence for V4V3LocationConverter { - fn convert(old: &xcm::v4::Location) -> Option { - (*old).clone().try_into().ok() - } - - fn convert_back(new: &xcm::v3::Location) -> Option { - (*new).try_into().ok() - } -} - pub struct MatchedConvertedConcreteId( PhantomData<(AssetId, Balance, MatchAssetId, ConvertAssetId, ConvertOther)>, ); diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index b8923a4d5c6e..c0b328f38e96 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -322,6 +322,29 @@ impl> Contains for IsChildSystemParachain } } +/// Matches if the given location is a system-level sibling parachain. +pub struct IsSiblingSystemParachain(PhantomData<(ParaId, SelfParaId)>); +impl + Eq, SelfParaId: Get> Contains + for IsSiblingSystemParachain +{ + fn contains(l: &Location) -> bool { + matches!( + l.unpack(), + (1, [Junction::Parachain(id)]) + if SelfParaId::get() != ParaId::from(*id) && ParaId::from(*id).is_system(), + ) + } +} + +/// Matches if the given location contains only the specified amount of parents and no interior +/// junctions. +pub struct IsParentsOnly(PhantomData); +impl> Contains for IsParentsOnly { + fn contains(t: &Location) -> bool { + t.contains_parents_only(Count::get()) + } +} + /// Allows only messages if the generic `ResponseHandler` expects them via `expecting_response`. pub struct AllowKnownQueryResponses(PhantomData); impl ShouldExecute for AllowKnownQueryResponses { diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 46d0ad227bfd..bd4a4c941c91 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -30,15 +30,15 @@ mod asset_conversion; #[allow(deprecated)] pub use asset_conversion::ConvertedConcreteAssetId; pub use asset_conversion::{ - AsPrefixedGeneralIndex, ConvertedConcreteId, MatchedConvertedConcreteId, V4V3LocationConverter, + AsPrefixedGeneralIndex, ConvertedConcreteId, MatchedConvertedConcreteId, }; mod barriers; pub use barriers::{ AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, IsChildSystemParachain, RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, - WithComputedOrigin, + DenyThenTry, IsChildSystemParachain, IsParentsOnly, IsSiblingSystemParachain, + RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, }; mod controller; @@ -81,7 +81,9 @@ pub use location_conversion::{ }; mod matches_location; -pub use matches_location::{StartsWith, StartsWithExplicitGlobalConsensus}; +pub use matches_location::{ + StartsWith, StartsWithExplicitGlobalConsensus, WithLatestLocationConverter, +}; mod matches_token; pub use matches_token::IsConcrete; diff --git a/polkadot/xcm/xcm-builder/src/matches_location.rs b/polkadot/xcm/xcm-builder/src/matches_location.rs index 1664c2477290..b6c2807e6b29 100644 --- a/polkadot/xcm/xcm-builder/src/matches_location.rs +++ b/polkadot/xcm/xcm-builder/src/matches_location.rs @@ -18,6 +18,8 @@ //! `InteriorLocation` types. use frame_support::traits::{Contains, Get}; +use sp_runtime::traits::MaybeEquivalence; +use sp_std::marker::PhantomData; use xcm::latest::{InteriorLocation, Location, NetworkId}; /// An implementation of `Contains` that checks for `Location` or @@ -51,3 +53,18 @@ impl> Contains for StartsWithExplicitGlobalC matches!(location.global_consensus(), Ok(requested_network) if requested_network.eq(&T::get())) } } + +/// An adapter implementation of `MaybeEquivalence` which can convert between the latest `Location` +/// and other versions that implement `TryInto` and `TryFrom`. +pub struct WithLatestLocationConverter(PhantomData); +impl + TryFrom + Clone> MaybeEquivalence + for WithLatestLocationConverter +{ + fn convert(old: &Location) -> Option { + (*old).clone().try_into().ok() + } + + fn convert_back(new: &Target) -> Option { + new.clone().try_into().ok() + } +} diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 81b81fe6a171..e673a46c4ac6 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -347,6 +347,9 @@ impl XcmExecutor { msg: Xcm<()>, reason: FeeReason, ) -> Result { + log::trace!( + target: "xcm::send", "Sending msg: {msg:?}, to destination: {dest:?}, (reason: {reason:?})" + ); let (ticket, fee) = validate_send::(dest, msg)?; self.take_fee(fee, reason)?; Config::XcmSender::deliver(ticket).map_err(Into::into) diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index 5da3d1da37c8..6d72eaf680fd 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -30,7 +30,7 @@ pub enum Error { } /// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo)] pub enum TransferType { /// should teleport `asset` to `dest` Teleport, @@ -39,7 +39,7 @@ pub enum TransferType { /// should reserve-transfer `asset` to `dest`, using `dest` as reserve DestinationReserve, /// should reserve-transfer `asset` to `dest`, using remote chain `Location` as reserve - RemoteReserve(Location), + RemoteReserve(VersionedLocation), } /// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` @@ -77,7 +77,7 @@ pub trait XcmAssetTransfers { Ok(TransferType::LocalReserve) } else if Self::IsReserve::contains(asset, &asset_location) { // remote location that is recognized as reserve location for asset - Ok(TransferType::RemoteReserve(asset_location)) + Ok(TransferType::RemoteReserve(asset_location.into())) } else { // remote location that is not configured either as teleporter or reserve => cannot // determine asset reserve diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml index db508e14dbaa..83f5434edddb 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml @@ -20,8 +20,8 @@ chain = "rococo-local" default_command = "polkadot" [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } + limits = { memory = "4G", cpu = "3" } + requests = { memory = "4G", cpu = "3" } [[relaychain.node_groups]] name = "elastic-validator" @@ -32,11 +32,20 @@ default_command = "polkadot" [[parachains]] id = {{id}} addToGenesis = true + [parachains.default_resources] + limits = { memory = "4G", cpu = "3" } + requests = { memory = "4G", cpu = "3" } [parachains.collator] name = "some-parachain" image = "{{COL_IMAGE}}" command = "adder-collator" args = ["-lparachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug"] + {% endfor %} +# This represents the layout of the adder collator block header. +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl index b9c002457549..d624cbaf9df6 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl @@ -18,11 +18,11 @@ elastic-validator-0: js-script ./assign-core.js with "2000,1" return is 0 within elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds # Non elastic parachain should progress normally -some-parachain-1: count of log lines containing "Parachain velocity: 1" is at least 9 within 20 seconds +some-parachain-1: count of log lines containing "Parachain velocity: 1" is at least 5 within 20 seconds # Sanity -some-parachain-1: count of log lines containing "Parachain velocity: 2" is 0 within 20 seconds +some-parachain-1: count of log lines containing "Parachain velocity: 2" is 0 -# Parachain should progress 3 blocks per relay chain block ideally, however this measurement does -# `ceil()` on the actual velocity to account for CI overload. -some-parachain: count of log lines containing "Parachain velocity: 3" is at least 9 within 20 seconds +# Parachain should progress 3 blocks per relay chain block ideally, however CI might not be +# the most performant environment so we'd just use a lower bound of 2 blocks per RCB +elastic-validator-0: parachain 2000 block height is at least 20 within 200 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js index 2e5f9d8cfa58..add63b6d3085 100644 --- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js +++ b/polkadot/zombienet_tests/elastic_scaling/assign-core.js @@ -1,6 +1,6 @@ async function run(nodeName, networkInfo, args) { - const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); + const wsUri = networkInfo.nodesByName[nodeName].wsUri; + const api = await zombie.connect(wsUri); let para = Number(args[0]); let core = Number(args[1]); @@ -33,8 +33,6 @@ async function run(nodeName, networkInfo, args) { }); }); - - return 0; } diff --git a/prdoc/pr_3250.prdoc b/prdoc/pr_3250.prdoc new file mode 100644 index 000000000000..77ea725073e6 --- /dev/null +++ b/prdoc/pr_3250.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Asset Conversion: Pool Account ID derivation with additional Pallet ID seed" + +doc: + - audience: Runtime Dev + description: | + Introduce PalletId as an additional seed parameter for pool's account id derivation. + The PR also introduces the `pallet_asset_conversion_ops` pallet with a call to migrate + a pool to the new account. Additionally `fungibles::roles::ResetTeam` and + `fungible::lifetime::Refund` traits, to facilitate the migration functionality. + +crates: + - name: pallet-asset-conversion + bump: minor diff --git a/prdoc/pr_3455.prdoc b/prdoc/pr_3455.prdoc new file mode 100644 index 000000000000..c16ac2244862 --- /dev/null +++ b/prdoc/pr_3455.prdoc @@ -0,0 +1,28 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Region reserve transfers fix + +doc: + - audience: Runtime User + description: | + This PR introduces changes enabling the transfer of coretime regions via XCM. + There are two primary issues that are resolved in this PR: + 1. The mint and burn functions were not implemented for coretime regions. These operations + are essential for moving assets to and from the XCM holding register. + 2. The transfer of non-fungible assets through XCM was previously disallowed. This was due + to incorrectly benchmarking non-fungible asset transfers via XCM, which led to assigning + it a weight of Weight::Max, effectively preventing its execution. + +migrations: + db: [] + runtime: + - reference: pallet-broker + description: | + The region owner is optional. + +crates: + - name: pallet-broker + - name: pallet-xcm + - name: coretime-rococo-runtime + - name: coretime-westend-runtime diff --git a/prdoc/pr_3630.prdoc b/prdoc/pr_3630.prdoc new file mode 100644 index 000000000000..67122761120d --- /dev/null +++ b/prdoc/pr_3630.prdoc @@ -0,0 +1,12 @@ +title: "Use async backing enabled collator for all the parachains" + +doc: + - audience: Node Operator + description: | + Promotes all the parachains supported by `polkadot-parachain`, including all the systems + parachains, to use the async backing enabled lookahead Aura collator instead of the generic + one. + +crates: + - name: polkadot-parachain-bin + bump: major diff --git a/prdoc/pr_3659.prdoc b/prdoc/pr_3659.prdoc new file mode 100644 index 000000000000..393844d822d8 --- /dev/null +++ b/prdoc/pr_3659.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Unity Balance Conversion for Different IDs of Native Asset + +doc: + - audience: Runtime Dev + description: | + Introduce types to define 1:1 balance conversion for different relative asset ids/locations + of native asset for `ConversionToAssetBalance` trait bounds. + +crates: [ ] \ No newline at end of file diff --git a/prdoc/pr_3695.prdoc b/prdoc/pr_3695.prdoc new file mode 100644 index 000000000000..2c2c2b2e6917 --- /dev/null +++ b/prdoc/pr_3695.prdoc @@ -0,0 +1,38 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-xcm: add new extrinsic for asset transfers using explicit reserve" + +doc: + - audience: Runtime User + description: | + pallet-xcm has a new extrinsic `transfer_assets_using_type` for transferring + assets from local chain to destination chain using an explicit XCM transfer + types for transferring the assets and the fees: + - `TransferType::LocalReserve`: transfer assets to sovereign account of destination + chain and forward a notification XCM to `dest` to mint and deposit reserve-based + assets to `beneficiary`. + - `TransferType::DestinationReserve`: burn local assets and forward a notification to + `dest` chain to withdraw the reserve assets from this chain's sovereign account and + deposit them to `beneficiary`. + - `TransferType::RemoteReserve(reserve)`: burn local assets, forward XCM to `reserve` + chain to move reserves from this chain's SA to `dest` chain's SA, and forward another + XCM to `dest` to mint and deposit reserve-based assets to `beneficiary`. Typically + the remote `reserve` is Asset Hub. + - `TransferType::Teleport`: burn local assets and forward XCM to `dest` chain to + mint/teleport assets and deposit them to `beneficiary`. + By default, an asset's reserve is its origin chain. But sometimes we may want to + explicitly use another chain as reserve (as long as allowed by runtime IsReserve + filter). + This is very helpful for transferring assets with multiple configured reserves + (such as Asset Hub ForeignAssets), when the transfer strictly depends on the used + reserve location. + + E.g. For transferring a bridged Foreign Assets between local parachains, Asset Hub + or the parachain that bridged the asset over must be used as the reserve location. + Same when transferring bridged assets back across the bridge, the local bridging + parachain must be used as the explicit reserve location. + +crates: +- name: pallet-xcm + bump: minor diff --git a/prdoc/pr_3721.prdoc b/prdoc/pr_3721.prdoc new file mode 100644 index 000000000000..be36103c4742 --- /dev/null +++ b/prdoc/pr_3721.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: New call `hrmp.establish_channel_with_system` to allow parachains to establish a channel with a system parachain + +doc: + - audience: Runtime Dev + description: | + This PR adds a new call `hrmp.establish_channel_with_system` that allows a parachain origin to open a bidirectional channel with a system parachain. + +crates: +- name: polkadot-runtime-parachains + bump: minor diff --git a/prdoc/pr_3789.prdoc b/prdoc/pr_3789.prdoc new file mode 100644 index 000000000000..802334423b16 --- /dev/null +++ b/prdoc/pr_3789.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-contracts] Benchmarks improvements" + +doc: + - audience: Runtime Dev + description: Reuse wasmi module when validating the wasm code. +crates: +- name: pallet-contracts + bump: patch + diff --git a/prdoc/pr_3852.prdoc b/prdoc/pr_3852.prdoc new file mode 100644 index 000000000000..f13e1766d518 --- /dev/null +++ b/prdoc/pr_3852.prdoc @@ -0,0 +1,25 @@ +title: (Breaking change)Enforce network key presence on authorities. + +doc: + - audience: Node Operator + description: | + (Breaking change) For all authority nodes, the node binary now enforces the presence + of a network key, instead of auto-generating when it is absent. + + Before this change, all node binaries were auto-generating the node key when it was not present, + that is dangerous because other nodes in the network expects a stable identity for authorities. + + To prevent accidental generation of node key, we removed this behaviour and node binary will now throw + an error if the network key is not present and operators will receive instructions to either persist + their network key or explicitly generate a new one with the `polkadot key generate-node-key`. + + To prevent this error on restart/upgrades node operators need to make sure their network key are always + persisted, if nodes already correctly persist all directories in `--base-path` then no action is needed. + +crates: + - name: sc-cli + bump: major + - name: polkadot + bump: major + - name: subkey + bump: minor \ No newline at end of file diff --git a/prdoc/pr_3915.prdoc b/prdoc/pr_3915.prdoc new file mode 100644 index 000000000000..a236288c99fb --- /dev/null +++ b/prdoc/pr_3915.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-contracts] Weights update" + +doc: + - audience: Runtime Dev + description: | + Update Host functions benchmarks, instead of benchmarking the whole call extrinsic, this PR solely benchmark the execution of the Host function. + Previously, some benchmarks would overestimate the weight as both the parsing and execution of the contract were included in the benchmark. + +crates: +- name: pallet-contracts + bump: patch diff --git a/prdoc/pr_4006.prdoc b/prdoc/pr_4006.prdoc new file mode 100644 index 000000000000..e6c339c406ac --- /dev/null +++ b/prdoc/pr_4006.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Deploy pallet-parameters to rococo and fix dynamic_params name expand" + +doc: + - audience: Runtime Dev + description: | + Fix the expanded names of `dynamic_params` to not remove suffix "s". + + Also deploy the parameters pallet to the rococo-runtime. + +crates: + - name: frame-support-procedural + bump: major + - name: rococo-runtime + bump: major + - name: pallet-parameters + bump: patch diff --git a/prdoc/pr_4017.prdoc b/prdoc/pr_4017.prdoc new file mode 100644 index 000000000000..9e83d5d1642a --- /dev/null +++ b/prdoc/pr_4017.prdoc @@ -0,0 +1,15 @@ +title: Complete removal of `TryRuntime` subcommand + +doc: + - audience: Node Operator + description: | + The deprecated `try-runtime` subcommand and `try-runtime-cli` crate has been completely removed. + If you are not already, you should be using the standalone CLI at https://github.com/paritytech/try-runtime-cli. + +crates: + - name: polkadot-parachain-bin + bump: patch + - name: polkadot-cli + bump: patch + - name: staging-node-cli + bump: patch diff --git a/prdoc/pr_4027.prdoc b/prdoc/pr_4027.prdoc new file mode 100644 index 000000000000..c85fd196a6c4 --- /dev/null +++ b/prdoc/pr_4027.prdoc @@ -0,0 +1,25 @@ +title: Add `candidates_pending_availability` Runtime API + +doc: + - audience: "Node Dev" + description: | + This new API retrieves all `CommittedCandidateReceipts` of all candidates pending availability + for a parachain at a given relay chain block number. It is required by collators that make use + of elastic scaling capability in the context of PoV recovery and block import. The old API + `candidate_pending_availability` is now deprectated and will be removed in the future. + +crates: + - name: polkadot-node-core-runtime-api + bump: minor + - name: polkadot-node-subsystem-types + bump: minor + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-primitives + bump: minor + - name: polkadot-runtime-parachains + bump: minor + - name: cumulus-relay-chain-rpc-interface + bump: minor + - name: cumulus-relay-chain-minimal-node + bump: minor diff --git a/prdoc/pr_4037.prdoc b/prdoc/pr_4037.prdoc new file mode 100644 index 000000000000..7071875a7e37 --- /dev/null +++ b/prdoc/pr_4037.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove `xcm::v3` from `assets-common` nits" + +doc: + - audience: Runtime Dev + description: | + Remove `xcm::v3` imports from `assets-common` to make it more generic and facilitate the transition to newer XCM versions. + The implementations `AssetIdForTrustBackedAssetsConvert`, `ForeignAssetsConvertedConcreteId`, or `TrustBackedAssetsAsLocation` + used hard-coded `xcm::v3::Location`, which has been changed to use `xcm::latest::Location` by default. + Alternatively, the providing runtime can configure them according to its needs, such as with a lower XCM version. + + Example: + ```patch + - AssetIdForTrustBackedAssetsConvert, + + AssetIdForTrustBackedAssetsConvert, + ``` + + Another change is that the removed `xcm_builder::V4V3LocationConverter` can be replaced with `WithLatestLocationConverter`. + +crates: +- name: assets-common + bump: patch +- name: staging-xcm-builder + bump: patch diff --git a/prdoc/pr_4059.prdoc b/prdoc/pr_4059.prdoc new file mode 100644 index 000000000000..92753328a433 --- /dev/null +++ b/prdoc/pr_4059.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove redundent logging code + +doc: + - audience: Node Dev + description: | + Simplified logging code, now does slightly less work while logging. + +crates: +- name: sc-tracing + bump: minor diff --git a/prdoc/pr_4070.prdoc b/prdoc/pr_4070.prdoc new file mode 100644 index 000000000000..a8f4f0f35053 --- /dev/null +++ b/prdoc/pr_4070.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Avoid using `para_backing_state` if runtime doesn't support async backing + +doc: + - audience: Node Operator + description: | + Fixes https://github.com/paritytech/polkadot-sdk/issues/4067 which prevents collators to + upgrade to latest release (v1.10.0) + +crates: [ ] diff --git a/prdoc/pr_4072.prdoc b/prdoc/pr_4072.prdoc new file mode 100644 index 000000000000..7195b99c9d5a --- /dev/null +++ b/prdoc/pr_4072.prdoc @@ -0,0 +1,10 @@ +title: "Amend chainspecs for people-westend and add IBP bootnodes" + +doc: + - audience: Node Operator + description: | + Fixes the people-westend chain spec. + +crates: + - name: polkadot-parachain-bin + bump: patch diff --git a/prdoc/pr_4089.prdoc b/prdoc/pr_4089.prdoc new file mode 100644 index 000000000000..29ac736ccd08 --- /dev/null +++ b/prdoc/pr_4089.prdoc @@ -0,0 +1,11 @@ +title: "pallet_broker: Support renewing leases expired in a previous period" + +doc: + - audience: Runtime User + description: | + Allow renewals of leases that ended before `start_sales` or in the first period after calling `start_sales`. + This ensures that everyone has a smooth experience when migrating to coretime and the timing of + `start_sales` isn't that important. + +crates: + - name: pallet-broker diff --git a/prdoc/pr_4118.prdoc b/prdoc/pr_4118.prdoc new file mode 100644 index 000000000000..20f36c1b0a37 --- /dev/null +++ b/prdoc/pr_4118.prdoc @@ -0,0 +1,13 @@ +title: "pallet assets: minor improvement on errors returned for some calls" + +doc: + - audience: Runtime Dev + description: | + Some calls in pallet assets have better errors. No new error is introduced, only more sensible choice are made. + - audience: Runtime User + description: | + Some calls in pallet assets have better errors. No new error is introduced, only more sensible choice are made. + +crates: + - name: pallet-assets + bump: minor diff --git a/prdoc/pr_4151.prdoc b/prdoc/pr_4151.prdoc new file mode 100644 index 000000000000..70b9f5e60e14 --- /dev/null +++ b/prdoc/pr_4151.prdoc @@ -0,0 +1,11 @@ +title: "[pallet-broker] Use saturating math in input validation" + +doc: + - audience: Runtime Dev + description: | + Use saturating in the pallet-broker input validation of the `drop_history` extrinsic. This + fixes a safeguard that only expired historic instantaneous pool records get dropped. + +crates: + - name: pallet-broker + bump: patch diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 6b44b1b28dfb..1fa4b8d1202c 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -200,6 +200,11 @@ "const": "patch", "title": "Patch", "description": "A bump to the third leftmost non-zero digit of the version number." + }, + { + "const": "none", + "title": "None", + "description": "This change requires no SemVer bump (e.g. change was a test)." } ] }, diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 09fa64d58948..e6f754fa40b1 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -116,7 +116,6 @@ node-primitives = { path = "../primitives" } sc-cli = { path = "../../../client/cli", optional = true } frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } serde_json = { workspace = true, default-features = true } [dev-dependencies] @@ -172,7 +171,6 @@ node-inspect = { package = "staging-node-inspect", path = "../inspect", optional frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true } -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } sc-cli = { path = "../../../client/cli", optional = true } pallet-balances = { path = "../../../frame/balances" } sc-storage-monitor = { path = "../../../client/storage-monitor" } @@ -188,7 +186,6 @@ cli = [ "sc-service/rocksdb", "substrate-build-script-utils", "substrate-frame-cli", - "try-runtime-cli", ] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", @@ -232,7 +229,6 @@ try-runtime = [ "pallet-treasury/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", - "try-runtime-cli/try-runtime", ] [[bench]] diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 56fbed51f8a4..1d1af6e03e9e 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -61,11 +61,6 @@ pub enum Subcommand { #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try-runtime has migrated to a standalone CLI - /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after January 2024. - TryRuntime, - /// Key management cli utilities #[command(subcommand)] Key(sc_cli::KeySubcommand), diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index d37325c7187e..d869b77e9122 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -221,12 +221,6 @@ pub fn run() -> Result<()> { Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) }) }, - #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.into()), - #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ - You can enable it with `--features try-runtime`." - .into()), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(&config)) diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index f4ed75b91964..00eab9b75f60 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -66,6 +66,7 @@ frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } pallet-alliance = { path = "../../../frame/alliance", default-features = false } pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } +pallet-asset-conversion-ops = { path = "../../../frame/asset-conversion/ops", default-features = false } pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } pallet-assets = { path = "../../../frame/assets", default-features = false } pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } @@ -166,6 +167,7 @@ std = [ "log/std", "node-primitives/std", "pallet-alliance/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-asset-rate/std", @@ -278,6 +280,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", @@ -354,6 +357,7 @@ try-runtime = [ "frame-system/try-runtime", "frame-try-runtime/try-runtime", "pallet-alliance/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-asset-rate/try-runtime", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 65d3ba0ed6ee..43c617023bcb 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -63,7 +63,7 @@ use frame_system::{ }; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; -use pallet_asset_conversion::{Ascending, Chain, WithFirstAsset}; +use pallet_asset_conversion::{AccountIdConverter, Ascending, Chain, WithFirstAsset}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; use pallet_identity::legacy::IdentityInfo; @@ -1602,6 +1602,7 @@ impl pallet_mmr::Config for Runtime { type Hashing = Keccak256; type LeafData = pallet_mmr::ParentNumberAndHash; type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type BlockHashProvider = pallet_mmr::DefaultBlockHashProvider; type WeightInfo = (); } @@ -1709,8 +1710,17 @@ impl pallet_asset_conversion::Config for Runtime { type Assets = UnionOf, AccountId>; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = Chain< - WithFirstAsset>, - Ascending>, + WithFirstAsset< + Native, + AccountId, + NativeOrWithId, + AccountIdConverter, + >, + Ascending< + AccountId, + NativeOrWithId, + AccountIdConverter, + >, >; type PoolAssetId = >::AssetId; type PoolAssets = PoolAssets; @@ -1727,6 +1737,19 @@ impl pallet_asset_conversion::Config for Runtime { type BenchmarkHelper = (); } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed<( + NativeOrWithId, + NativeOrWithId, + )>; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = pallet_asset_conversion_ops::weights::SubstrateWeight; +} + parameter_types! { pub const QueueCount: u32 = 300; pub const MaxQueueLen: u32 = 1000; @@ -2197,7 +2220,7 @@ impl EnsureOriginWithArg for DynamicParamet frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; return Ok(()) }, - RuntimeParametersKey::Contract(_) => { + RuntimeParametersKey::Contracts(_) => { frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; return Ok(()) }, @@ -2473,6 +2496,9 @@ mod runtime { #[runtime::pallet_index(78)] pub type PalletExampleMbms = pallet_example_mbm; + + #[runtime::pallet_index(79)] + pub type AssetConversionMigration = pallet_asset_conversion_ops; } /// The address format for describing accounts. @@ -2632,6 +2658,7 @@ mod benches { [pallet_tx_pause, TxPause] [pallet_safe_mode, SafeMode] [pallet_example_mbm, PalletExampleMbms] + [pallet_asset_conversion_ops, AssetConversionMigration] ); } diff --git a/substrate/bin/utils/subkey/src/lib.rs b/substrate/bin/utils/subkey/src/lib.rs index 33f28ef46a5e..0ca65cd08a6b 100644 --- a/substrate/bin/utils/subkey/src/lib.rs +++ b/substrate/bin/utils/subkey/src/lib.rs @@ -310,7 +310,7 @@ use clap::Parser; use sc_cli::{ - Error, GenerateCmd, GenerateNodeKeyCmd, InspectKeyCmd, InspectNodeKeyCmd, SignCmd, VanityCmd, + Error, GenerateCmd, GenerateKeyCmdCommon, InspectKeyCmd, InspectNodeKeyCmd, SignCmd, VanityCmd, VerifyCmd, }; @@ -324,7 +324,7 @@ use sc_cli::{ pub enum Subkey { /// Generate a random node key, write it to a file or stdout and write the /// corresponding peer-id to stderr - GenerateNodeKey(GenerateNodeKeyCmd), + GenerateNodeKey(GenerateKeyCmdCommon), /// Generate a random account Generate(GenerateCmd), diff --git a/substrate/client/cli/src/commands/generate_node_key.rs b/substrate/client/cli/src/commands/generate_node_key.rs index 43851dc1af5c..bdb94eec93b4 100644 --- a/substrate/client/cli/src/commands/generate_node_key.rs +++ b/substrate/client/cli/src/commands/generate_node_key.rs @@ -17,23 +17,19 @@ //! Implementation of the `generate-node-key` subcommand -use crate::Error; -use clap::Parser; +use crate::{build_network_key_dir_or_default, Error, NODE_KEY_ED25519_FILE}; +use clap::{Args, Parser}; use libp2p_identity::{ed25519, Keypair}; +use sc_service::BasePath; use std::{ fs, io::{self, Write}, path::PathBuf, }; -/// The `generate-node-key` command -#[derive(Debug, Parser)] -#[command( - name = "generate-node-key", - about = "Generate a random node key, write it to a file or stdout \ - and write the corresponding peer-id to stderr" -)] -pub struct GenerateNodeKeyCmd { +/// Common arguments accross all generate key commands, subkey and node. +#[derive(Debug, Args, Clone)] +pub struct GenerateKeyCmdCommon { /// Name of file to save secret key to. /// If not given, the secret key is printed to stdout. #[arg(long)] @@ -45,32 +41,111 @@ pub struct GenerateNodeKeyCmd { bin: bool, } -impl GenerateNodeKeyCmd { +/// The `generate-node-key` command +#[derive(Debug, Clone, Parser)] +#[command( + name = "generate-node-key", + about = "Generate a random node key, write it to a file or stdout \ + and write the corresponding peer-id to stderr" +)] +pub struct GenerateNodeKeyCmd { + #[clap(flatten)] + pub common: GenerateKeyCmdCommon, + /// Specify the chain specification. + /// + /// It can be any of the predefined chains like dev, local, staging, polkadot, kusama. + #[arg(long, value_name = "CHAIN_SPEC")] + pub chain: Option, + /// A directory where the key should be saved. If a key already + /// exists in the directory, it won't be overwritten. + #[arg(long, conflicts_with_all = ["file", "default_base_path"])] + base_path: Option, + + /// Save the key in the default directory. If a key already + /// exists in the directory, it won't be overwritten. + #[arg(long, conflicts_with_all = ["base_path", "file"])] + default_base_path: bool, +} + +impl GenerateKeyCmdCommon { /// Run the command pub fn run(&self) -> Result<(), Error> { - let keypair = ed25519::Keypair::generate(); + generate_key(&self.file, self.bin, None, &None, false, None) + } +} + +impl GenerateNodeKeyCmd { + /// Run the command + pub fn run(&self, chain_spec_id: &str, executable_name: &String) -> Result<(), Error> { + generate_key( + &self.common.file, + self.common.bin, + Some(chain_spec_id), + &self.base_path, + self.default_base_path, + Some(executable_name), + ) + } +} + +// Utility function for generating a key based on the provided CLI arguments +// +// `file` - Name of file to save secret key to +// `bin` +fn generate_key( + file: &Option, + bin: bool, + chain_spec_id: Option<&str>, + base_path: &Option, + default_base_path: bool, + executable_name: Option<&String>, +) -> Result<(), Error> { + let keypair = ed25519::Keypair::generate(); - let secret = keypair.secret(); + let secret = keypair.secret(); - let file_data = if self.bin { - secret.as_ref().to_owned() - } else { - array_bytes::bytes2hex("", secret).into_bytes() - }; + let file_data = if bin { + secret.as_ref().to_owned() + } else { + array_bytes::bytes2hex("", secret).into_bytes() + }; - match &self.file { - Some(file) => fs::write(file, file_data)?, - None => io::stdout().lock().write_all(&file_data)?, - } + match (file, base_path, default_base_path) { + (Some(file), None, false) => fs::write(file, file_data)?, + (None, Some(_), false) | (None, None, true) => { + let network_path = build_network_key_dir_or_default( + base_path.clone().map(BasePath::new), + chain_spec_id.unwrap_or_default(), + executable_name.ok_or(Error::Input("Executable name not provided".into()))?, + ); - eprintln!("{}", Keypair::from(keypair).public().to_peer_id()); + fs::create_dir_all(network_path.as_path())?; - Ok(()) + let key_path = network_path.join(NODE_KEY_ED25519_FILE); + if key_path.exists() { + eprintln!("Skip generation, a key already exists in {:?}", key_path); + return Err(Error::KeyAlreadyExistsInPath(key_path)); + } else { + eprintln!("Generating key in {:?}", key_path); + fs::write(key_path, file_data)? + } + }, + (None, None, false) => io::stdout().lock().write_all(&file_data)?, + (_, _, _) => { + // This should not happen, arguments are marked as mutually exclusive. + return Err(Error::Input("Mutually exclusive arguments provided".into())); + }, } + + eprintln!("{}", Keypair::from(keypair).public().to_peer_id()); + + Ok(()) } #[cfg(test)] -mod tests { +pub mod tests { + use crate::DEFAULT_NETWORK_CONFIG_PATH; + use super::*; use std::io::Read; use tempfile::Builder; @@ -80,9 +155,32 @@ mod tests { let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); let file_path = file.path().display().to_string(); let generate = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", &file_path]); - assert!(generate.run().is_ok()); + assert!(generate.run("test", &String::from("test")).is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); assert!(array_bytes::hex2bytes(&buf).is_ok()); } + + #[test] + fn generate_node_key_base_path() { + let base_dir = Builder::new().prefix("keyfile").tempdir().unwrap(); + let key_path = base_dir + .path() + .join("chains/test_id/") + .join(DEFAULT_NETWORK_CONFIG_PATH) + .join(NODE_KEY_ED25519_FILE); + let base_path = base_dir.path().display().to_string(); + let generate = + GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--base-path", &base_path]); + assert!(generate.run("test_id", &String::from("test")).is_ok()); + let buf = fs::read_to_string(key_path.as_path()).unwrap(); + assert!(array_bytes::hex2bytes(&buf).is_ok()); + + assert!(generate.run("test_id", &String::from("test")).is_err()); + let new_buf = fs::read_to_string(key_path).unwrap(); + assert_eq!( + array_bytes::hex2bytes(&new_buf).unwrap(), + array_bytes::hex2bytes(&buf).unwrap() + ); + } } diff --git a/substrate/client/cli/src/commands/inspect_node_key.rs b/substrate/client/cli/src/commands/inspect_node_key.rs index 6cf025a2d115..25a0a685650e 100644 --- a/substrate/client/cli/src/commands/inspect_node_key.rs +++ b/substrate/client/cli/src/commands/inspect_node_key.rs @@ -79,7 +79,9 @@ impl InspectNodeKeyCmd { #[cfg(test)] mod tests { - use super::{super::GenerateNodeKeyCmd, *}; + use crate::commands::generate_node_key::GenerateNodeKeyCmd; + + use super::*; #[test] fn inspect_node_key() { @@ -87,7 +89,7 @@ mod tests { let path = path.to_str().unwrap(); let cmd = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", path]); - assert!(cmd.run().is_ok()); + assert!(cmd.run("test", &String::from("test")).is_ok()); let cmd = InspectNodeKeyCmd::parse_from(&["inspect-node-key", "--file", path]); assert!(cmd.run().is_ok()); diff --git a/substrate/client/cli/src/commands/key.rs b/substrate/client/cli/src/commands/key.rs index d49b7e4072c8..52747b404622 100644 --- a/substrate/client/cli/src/commands/key.rs +++ b/substrate/client/cli/src/commands/key.rs @@ -47,7 +47,10 @@ impl KeySubcommand { /// run the key subcommands pub fn run(&self, cli: &C) -> Result<(), Error> { match self { - KeySubcommand::GenerateNodeKey(cmd) => cmd.run(), + KeySubcommand::GenerateNodeKey(cmd) => { + let chain_spec = cli.load_spec(cmd.chain.as_deref().unwrap_or(""))?; + cmd.run(chain_spec.id(), &C::executable_name()) + }, KeySubcommand::Generate(cmd) => cmd.run(), KeySubcommand::Inspect(cmd) => cmd.run(), KeySubcommand::Insert(cmd) => cmd.run(cli), diff --git a/substrate/client/cli/src/commands/mod.rs b/substrate/client/cli/src/commands/mod.rs index 9d48d2bdf644..2d7a0dc72ff5 100644 --- a/substrate/client/cli/src/commands/mod.rs +++ b/substrate/client/cli/src/commands/mod.rs @@ -42,7 +42,7 @@ mod verify; pub use self::{ build_spec_cmd::BuildSpecCmd, chain_info_cmd::ChainInfoCmd, check_block_cmd::CheckBlockCmd, export_blocks_cmd::ExportBlocksCmd, export_state_cmd::ExportStateCmd, generate::GenerateCmd, - generate_node_key::GenerateNodeKeyCmd, import_blocks_cmd::ImportBlocksCmd, + generate_node_key::GenerateKeyCmdCommon, import_blocks_cmd::ImportBlocksCmd, insert_key::InsertKeyCmd, inspect_key::InspectKeyCmd, inspect_node_key::InspectNodeKeyCmd, key::KeySubcommand, purge_chain_cmd::PurgeChainCmd, revert_cmd::RevertCmd, run_cmd::RunCmd, sign::SignCmd, vanity::VanityCmd, verify::VerifyCmd, diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 5def9ce9b726..70a4885e5eef 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -428,8 +428,10 @@ pub trait CliConfiguration: Sized { /// By default this is retrieved from `NodeKeyParams` if it is available. Otherwise its /// `NodeKeyConfig::default()`. fn node_key(&self, net_config_dir: &PathBuf) -> Result { + let is_dev = self.is_dev()?; + let role = self.role(is_dev)?; self.node_key_params() - .map(|x| x.node_key(net_config_dir)) + .map(|x| x.node_key(net_config_dir, role, is_dev)) .unwrap_or_else(|| Ok(Default::default())) } @@ -463,11 +465,9 @@ pub trait CliConfiguration: Sized { let is_dev = self.is_dev()?; let chain_id = self.chain_id(is_dev)?; let chain_spec = cli.load_spec(&chain_id)?; - let base_path = self - .base_path()? - .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); - let config_dir = base_path.config_dir(chain_spec.id()); - let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); + let base_path = base_path_or_default(self.base_path()?, &C::executable_name()); + let config_dir = build_config_dir(&base_path, chain_spec.id()); + let net_config_dir = build_net_config_dir(&config_dir); let client_id = C::client_id(); let database_cache_size = self.database_cache_size()?.unwrap_or(1024); let database = self.database()?.unwrap_or( @@ -665,3 +665,33 @@ pub fn generate_node_name() -> String { } } } + +/// Returns the value of `base_path` or the default_path if it is None +pub(crate) fn base_path_or_default( + base_path: Option, + executable_name: &String, +) -> BasePath { + base_path.unwrap_or_else(|| BasePath::from_project("", "", executable_name)) +} + +/// Returns the default path for configuration directory based on the chain_spec +pub(crate) fn build_config_dir(base_path: &BasePath, chain_spec_id: &str) -> PathBuf { + base_path.config_dir(chain_spec_id) +} + +/// Returns the default path for the network configuration inside the configuration dir +pub(crate) fn build_net_config_dir(config_dir: &PathBuf) -> PathBuf { + config_dir.join(DEFAULT_NETWORK_CONFIG_PATH) +} + +/// Returns the default path for the network directory starting from the provided base_path +/// or from the default base_path. +pub(crate) fn build_network_key_dir_or_default( + base_path: Option, + chain_spec_id: &str, + executable_name: &String, +) -> PathBuf { + let config_dir = + build_config_dir(&base_path_or_default(base_path, executable_name), chain_spec_id); + build_net_config_dir(&config_dir) +} diff --git a/substrate/client/cli/src/error.rs b/substrate/client/cli/src/error.rs index 6c0cfca4932e..90ad048009ad 100644 --- a/substrate/client/cli/src/error.rs +++ b/substrate/client/cli/src/error.rs @@ -18,6 +18,8 @@ //! Initialization errors. +use std::path::PathBuf; + use sp_core::crypto; /// Result type alias for the CLI. @@ -78,6 +80,20 @@ pub enum Error { #[error(transparent)] GlobalLoggerError(#[from] sc_tracing::logging::Error), + + #[error( + "Starting an authorithy without network key in {0}. + \n This is not a safe operation because other authorities in the network may depend on your node having a stable identity. + \n Otherwise these other authorities may not being able to reach you. + \n If it is the first time running your node you could use one of the following methods: + \n 1. [Preferred] Separately generate the key with: key generate-node-key --base-path + \n 2. [Preferred] Separately generate the key with: key generate-node-key --file + \n 3. [Preferred] Separately generate the key with: key generate-node-key --default-base-path + \n 4. [Unsafe] Pass --unsafe-force-node-key-generation and make sure you remove it for subsequent node restarts" + )] + NetworkKeyNotFound(PathBuf), + #[error("A network key already exists in path {0}")] + KeyAlreadyExistsInPath(PathBuf), } impl From<&str> for Error { diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index 53f19f58e1fb..7058af19f1d4 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -18,15 +18,16 @@ use clap::Args; use sc_network::config::{identity::ed25519, NodeKeyConfig}; +use sc_service::Role; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; -use crate::{arg_enums::NodeKeyType, error}; +use crate::{arg_enums::NodeKeyType, error, Error}; /// The file name of the node's Ed25519 secret key inside the chain-specific /// network config directory, if neither `--node-key` nor `--node-key-file` /// is specified in combination with `--node-key-type=ed25519`. -const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; +pub(crate) const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// Parameters used to create the `NodeKeyConfig`, which determines the keypair /// used for libp2p networking. @@ -79,22 +80,48 @@ pub struct NodeKeyParams { /// the chosen type. #[arg(long, value_name = "FILE")] pub node_key_file: Option, + + /// Forces key generation if node-key-file file does not exist. + /// + /// This is an unsafe feature for production networks, because as an active authority + /// other authorities may depend on your node having a stable identity and they might + /// not being able to reach you if your identity changes after entering the active set. + /// + /// For minimal node downtime if no custom `node-key-file` argument is provided + /// the network-key is usually persisted accross nodes restarts, + /// in the `network` folder from directory provided in `--base-path` + /// + /// Warning!! If you ever run the node with this argument, make sure + /// you remove it for the subsequent restarts. + #[arg(long)] + pub unsafe_force_node_key_generation: bool, } impl NodeKeyParams { /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. - pub fn node_key(&self, net_config_dir: &PathBuf) -> error::Result { + pub fn node_key( + &self, + net_config_dir: &PathBuf, + role: Role, + is_dev: bool, + ) -> error::Result { Ok(match self.node_key_type { NodeKeyType::Ed25519 => { let secret = if let Some(node_key) = self.node_key.as_ref() { parse_ed25519_secret(node_key)? } else { - sc_network::config::Secret::File( - self.node_key_file - .clone() - .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)), - ) + let key_path = self + .node_key_file + .clone() + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); + if !self.unsafe_force_node_key_generation && + role.is_authority() && !is_dev && + !key_path.exists() + { + return Err(Error::NetworkKeyNotFound(key_path)) + } + sc_network::config::Secret::File(key_path) }; NodeKeyConfig::Ed25519(secret) @@ -122,7 +149,8 @@ mod tests { use super::*; use clap::ValueEnum; use libp2p_identity::ed25519; - use std::fs; + use std::fs::{self, File}; + use tempfile::TempDir; #[test] fn test_node_key_config_input() { @@ -136,8 +164,9 @@ mod tests { node_key_type, node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), node_key_file: None, + unsafe_force_node_key_generation: false, }; - params.node_key(net_config_dir).and_then(|c| match c { + params.node_key(net_config_dir, Role::Authority, false).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => Ok(()), @@ -156,10 +185,11 @@ mod tests { node_key_type: NodeKeyType::Ed25519, node_key: None, node_key_file: Some(file), + unsafe_force_node_key_generation: false, }; let node_key = params - .node_key(&PathBuf::from("not-used")) + .node_key(&PathBuf::from("not-used"), Role::Authority, false) .expect("Creates node key config") .into_keypair() .expect("Creates node key pair"); @@ -186,29 +216,58 @@ mod tests { #[test] fn test_node_key_config_default() { - fn with_def_params(f: F) -> error::Result<()> + fn with_def_params(f: F, unsafe_force_node_key_generation: bool) -> error::Result<()> where F: Fn(NodeKeyParams) -> error::Result<()>, { NodeKeyType::value_variants().iter().try_for_each(|t| { let node_key_type = *t; - f(NodeKeyParams { node_key_type, node_key: None, node_key_file: None }) + f(NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: None, + unsafe_force_node_key_generation, + }) }) } - fn some_config_dir(net_config_dir: &PathBuf) -> error::Result<()> { - with_def_params(|params| { - let dir = PathBuf::from(net_config_dir.clone()); - let typ = params.node_key_type; - params.node_key(net_config_dir).and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => - Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) - }) + fn some_config_dir( + net_config_dir: &PathBuf, + unsafe_force_node_key_generation: bool, + role: Role, + is_dev: bool, + ) -> error::Result<()> { + with_def_params( + |params| { + let dir = PathBuf::from(net_config_dir.clone()); + let typ = params.node_key_type; + let role = role.clone(); + params.node_key(net_config_dir, role, is_dev).and_then(move |c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && + f == &dir.join(NODE_KEY_ED25519_FILE) => + Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) + }, + unsafe_force_node_key_generation, + ) } - assert!(some_config_dir(&PathBuf::from_str("x").unwrap()).is_ok()); + assert!(some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Full, false).is_ok()); + assert!( + some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Authority, true).is_ok() + ); + assert!( + some_config_dir(&PathBuf::from_str("x").unwrap(), true, Role::Authority, false).is_ok() + ); + assert!(matches!( + some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Authority, false), + Err(Error::NetworkKeyNotFound(_)) + )); + + let tempdir = TempDir::new().unwrap(); + let _file = File::create(tempdir.path().join(NODE_KEY_ED25519_FILE)).unwrap(); + assert!(some_config_dir(&tempdir.path().into(), false, Role::Authority, false).is_ok()); } } diff --git a/substrate/client/consensus/grandpa/src/communication/gossip.rs b/substrate/client/consensus/grandpa/src/communication/gossip.rs index 88821faf0aba..c7fe5a46a5eb 100644 --- a/substrate/client/consensus/grandpa/src/communication/gossip.rs +++ b/substrate/client/consensus/grandpa/src/communication/gossip.rs @@ -810,7 +810,7 @@ impl Inner { self.live_topics.push(round, set_id); self.peers.reshuffle(); - self.multicast_neighbor_packet(false) + self.multicast_neighbor_packet() } /// Note that a voter set with given ID has started. Does nothing if the last @@ -847,10 +847,7 @@ impl Inner { self.live_topics.push(Round(1), set_id); self.authorities = authorities; - // when transitioning to a new set we also want to send neighbor packets to light clients, - // this is so that they know who to ask justifications from in order to finalize the last - // block in the previous set. - self.multicast_neighbor_packet(true) + self.multicast_neighbor_packet() } /// Note that we've imported a commit finalizing a given block. Does nothing if the last @@ -869,7 +866,7 @@ impl Inner { return None } - self.multicast_neighbor_packet(false) + self.multicast_neighbor_packet() } fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { @@ -1183,7 +1180,7 @@ impl Inner { (neighbor_topics, action, catch_up, report) } - fn multicast_neighbor_packet(&self, force_light: bool) -> MaybeMessage { + fn multicast_neighbor_packet(&self) -> MaybeMessage { self.local_view.as_ref().map(|local_view| { let packet = NeighborPacket { round: local_view.round, @@ -1191,22 +1188,7 @@ impl Inner { commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), }; - let peers = self - .peers - .inner - .iter() - .filter_map(|(id, info)| { - // light clients don't participate in the full GRANDPA voter protocol - // and therefore don't need to be informed about all view updates unless - // we explicitly require it (e.g. when transitioning to a new set) - if info.roles.is_light() && !force_light { - None - } else { - Some(id) - } - }) - .cloned() - .collect(); + let peers = self.peers.inner.iter().map(|(id, _)| id).cloned().collect(); (peers, packet) }) @@ -2602,7 +2584,7 @@ mod tests { } #[test] - fn sends_neighbor_packets_to_non_light_peers_when_starting_a_new_round() { + fn sends_neighbor_packets_to_all_peers_when_starting_a_new_round() { let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // initialize the validator to a stable set id @@ -2617,10 +2599,10 @@ mod tests { val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); val.note_round(Round(2), |peers, message| { - assert_eq!(peers.len(), 2); + assert_eq!(peers.len(), 3); assert!(peers.contains(&authority_peer)); assert!(peers.contains(&full_peer)); - assert!(!peers.contains(&light_peer)); + assert!(peers.contains(&light_peer)); assert!(matches!(message, NeighborPacket { set_id: SetId(1), round: Round(2), .. })); }); } diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 1a7a18f2ea79..fcda25907927 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -1063,7 +1063,7 @@ where let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { peer } else { - error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); + error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID {peer_id}"); return Some((hash, number)) }; diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 1dfe7d4454e9..48a4b3d6e6e1 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -393,13 +393,14 @@ where futures::executor::block_on(self.block_import.import_block(import_block)) .expect("block_import failed"); - if announce_block { - self.sync_service.announce_block(hash, None); - } hashes.push(hash); at = hash; } + if announce_block { + self.sync_service.announce_block(at, None); + } + if inform_sync_about_new_best_block { self.sync_service.new_best_block_imported( at, diff --git a/substrate/client/network/types/src/peer_id.rs b/substrate/client/network/types/src/peer_id.rs index 44d4fa99252b..14ac4a1e9aae 100644 --- a/substrate/client/network/types/src/peer_id.rs +++ b/substrate/client/network/types/src/peer_id.rs @@ -97,7 +97,7 @@ impl PeerId { /// Convert `PeerId` into ed25519 public key bytes. pub fn into_ed25519(&self) -> Option<[u8; 32]> { let hash = &self.multihash; - // https://www.ietf.org/id/draft-multiformats-multihash-07.html#name-the-multihash-identifier-re + // https://www.ietf.org/archive/id/draft-multiformats-multihash-07.html#name-the-multihash-identifier-re if hash.code() != 0 { // Hash is not identity return None diff --git a/substrate/client/tracing/src/logging/event_format.rs b/substrate/client/tracing/src/logging/event_format.rs index 235d66cadc78..8d875427841e 100644 --- a/substrate/client/tracing/src/logging/event_format.rs +++ b/substrate/client/tracing/src/logging/event_format.rs @@ -23,10 +23,8 @@ use std::fmt::{self, Write}; use tracing::{Event, Level, Subscriber}; use tracing_log::NormalizeEvent; use tracing_subscriber::{ - field::RecordFields, fmt::{format, time::FormatTime, FmtContext, FormatEvent, FormatFields}, - layer::Context, - registry::{LookupSpan, SpanRef}, + registry::LookupSpan, }; /// A pre-configured event formatter. @@ -54,7 +52,7 @@ where // https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 pub(crate) fn format_event_custom<'b, 'w, S, N>( &self, - ctx: CustomFmtContext<'b, S, N>, + ctx: &FmtContext<'b, S, N>, writer: format::Writer<'w>, event: &Event, ) -> fmt::Result @@ -68,7 +66,7 @@ where time::write(&self.timer, &mut format::Writer::new(&mut writer), self.enable_color)?; if self.display_level { - let fmt_level = { FmtLevel::new(meta.level(), self.enable_color) }; + let fmt_level = FmtLevel::new(meta.level(), self.enable_color); write!(writer, "{} ", fmt_level)?; } @@ -137,12 +135,12 @@ where { let mut out = String::new(); let buf_writer = format::Writer::new(&mut out); - self.format_event_custom(CustomFmtContext::FmtContext(ctx), buf_writer, event)?; + self.format_event_custom(ctx, buf_writer, event)?; writer.write_str(&out)?; print!("{}", out); Ok(()) } else { - self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) + self.format_event_custom(ctx, writer, event) } } } @@ -261,48 +259,6 @@ mod time { } } -// NOTE: `FmtContext`'s fields are private. This enum allows us to make a `format_event` function -// that works with `FmtContext` or `Context` with `FormatFields` -#[allow(dead_code)] -pub(crate) enum CustomFmtContext<'a, S, N> { - FmtContext(&'a FmtContext<'a, S, N>), - ContextWithFormatFields(&'a Context<'a, S>, &'a N), -} - -impl<'a, S, N> FormatFields<'a> for CustomFmtContext<'a, S, N> -where - S: Subscriber + for<'lookup> LookupSpan<'lookup>, - N: for<'writer> FormatFields<'writer> + 'static, -{ - fn format_fields(&self, writer: format::Writer<'_>, fields: R) -> fmt::Result { - match self { - CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), - CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => - fmt_fields.format_fields(writer, fields), - } - } -} - -// NOTE: the following code has been duplicated from tracing-subscriber -// -// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/fmt_layer.rs#L788 -impl<'a, S, N> CustomFmtContext<'a, S, N> -where - S: Subscriber + for<'lookup> LookupSpan<'lookup>, - N: for<'writer> FormatFields<'writer> + 'static, -{ - #[inline] - pub fn lookup_current(&self) -> Option> - where - S: for<'lookup> LookupSpan<'lookup>, - { - match self { - CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.lookup_current(), - CustomFmtContext::ContextWithFormatFields(ctx, _) => ctx.lookup_current(), - } - } -} - /// A writer which (optionally) strips out terminal control codes from the logs. /// /// This is used by [`EventFormat`] to sanitize the log messages. diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 1a8e8eea4849..cf50d7b22af9 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.20", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } @@ -40,6 +41,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-assets/std", "pallet-balances/std", "primitive-types/std", diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml new file mode 100644 index 000000000000..e421e904a3a1 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "pallet-asset-conversion-ops" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME asset conversion pallet's operations suite" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.20", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +pallet-asset-conversion = { path = "..", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } + +[dev-dependencies] +pallet-balances = { path = "../../balances" } +pallet-assets = { path = "../../assets" } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-asset-conversion/std", + "pallet-assets/std", + "pallet-balances/std", + "primitive-types/std", + "scale-info/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-asset-conversion/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/asset-conversion/ops/src/benchmarking.rs b/substrate/frame/asset-conversion/ops/src/benchmarking.rs new file mode 100644 index 000000000000..a7370f38bc4b --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/benchmarking.rs @@ -0,0 +1,167 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Asset Conversion Ops pallet benchmarking. + +use super::*; +use crate::Pallet as AssetConversionOps; +use frame_benchmarking::{v2::*, whitelisted_caller}; +use frame_support::{ + assert_ok, + traits::fungibles::{Create, Inspect, Mutate}, +}; +use frame_system::RawOrigin as SystemOrigin; +use pallet_asset_conversion::{BenchmarkHelper, Pallet as AssetConversion}; +use sp_core::Get; +use sp_runtime::traits::One; +use sp_std::prelude::*; + +/// Provides a pair of amounts expected to serve as sufficient initial liquidity for a pool. +fn valid_liquidity_amount(ed1: T::Balance, ed2: T::Balance) -> (T::Balance, T::Balance) +where + T::Assets: Inspect, +{ + let l = + ed1.max(ed2) + T::MintMinLiquidity::get() + T::MintMinLiquidity::get() + T::Balance::one(); + (l, l) +} + +/// Create the `asset` and mint the `amount` for the `caller`. +fn create_asset(caller: &T::AccountId, asset: &T::AssetKind, amount: T::Balance) +where + T::Assets: Create + Mutate, +{ + if !T::Assets::asset_exists(asset.clone()) { + assert_ok!(T::Assets::create(asset.clone(), caller.clone(), true, T::Balance::one())); + } + assert_ok!(T::Assets::mint_into( + asset.clone(), + &caller, + amount + T::Assets::minimum_balance(asset.clone()) + )); +} + +/// Create the designated fee asset for pool creation. +fn create_fee_asset(caller: &T::AccountId) +where + T::Assets: Create + Mutate, +{ + let fee_asset = T::PoolSetupFeeAsset::get(); + if !T::Assets::asset_exists(fee_asset.clone()) { + assert_ok!(T::Assets::create(fee_asset.clone(), caller.clone(), true, T::Balance::one())); + } + assert_ok!(T::Assets::mint_into( + fee_asset.clone(), + &caller, + T::Assets::minimum_balance(fee_asset) + )); +} + +/// Mint the fee asset for the `caller` sufficient to cover the fee for creating a new pool. +fn mint_setup_fee_asset( + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, + lp_token: &T::PoolAssetId, +) where + T::Assets: Create + Mutate, +{ + assert_ok!(T::Assets::mint_into( + T::PoolSetupFeeAsset::get(), + &caller, + T::PoolSetupFee::get() + + T::Assets::deposit_required(asset1.clone()) + + T::Assets::deposit_required(asset2.clone()) + + T::PoolAssets::deposit_required(lp_token.clone()) + )); +} + +/// Creates a pool for a given asset pair. +/// +/// This action mints the necessary amounts of the given assets for the `caller` to provide initial +/// liquidity. It returns the LP token ID along with a pair of amounts sufficient for the pool's +/// initial liquidity. +fn create_asset_and_pool( + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, +) -> (T::PoolAssetId, T::Balance, T::Balance) +where + T::Assets: Create + Mutate, +{ + let (liquidity1, liquidity2) = valid_liquidity_amount::( + T::Assets::minimum_balance(asset1.clone()), + T::Assets::minimum_balance(asset2.clone()), + ); + create_asset::(caller, asset1, liquidity1); + create_asset::(caller, asset2, liquidity2); + let lp_token = AssetConversion::::get_next_pool_asset_id(); + + mint_setup_fee_asset::(caller, asset1, asset2, &lp_token); + + assert_ok!(AssetConversion::::create_pool( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()) + )); + + (lp_token, liquidity1, liquidity2) +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where T::Assets: Create + Mutate, T::PoolAssetId: Into,)] +mod benchmarks { + use super::*; + + #[benchmark] + fn migrate_to_new_account() { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); + + create_fee_asset::(&caller); + let (_, liquidity1, liquidity2) = create_asset_and_pool::(&caller, &asset1, &asset2); + + assert_ok!(AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), + caller.clone(), + )); + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), Box::new(asset1.clone()), Box::new(asset2.clone())); + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2).unwrap(); + let (prior_account, new_account) = AssetConversionOps::::addresses(&pool_id).unwrap(); + assert_last_event::( + Event::MigratedToNewAccount { pool_id, new_account, prior_account }.into(), + ); + } + + impl_benchmark_test_suite!(AssetConversionOps, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/asset-conversion/ops/src/lib.rs b/substrate/frame/asset-conversion/ops/src/lib.rs new file mode 100644 index 000000000000..6cc7bfa2e996 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/lib.rs @@ -0,0 +1,331 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Conversion Operations Suite. +//! +//! This pallet provides operational functionalities for the Asset Conversion pallet, +//! allowing you to perform various migration and one-time-use operations. These operations +//! are designed to facilitate updates and changes to the Asset Conversion pallet without +//! breaking its API. +//! +//! ## Overview +//! +//! This suite allows you to perform the following operations: +//! - Perform migration to update account ID derivation methods for existing pools. The migration +//! operation ensures that the required accounts are created, existing account deposits are +//! transferred, and liquidity is moved to the new accounts. + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; +pub use pallet::*; +pub use weights::WeightInfo; + +use frame_support::traits::{ + fungible::{Inspect as FungibleInspect, Mutate as FungibleMutate}, + fungibles::{roles::ResetTeam, Inspect, Mutate, Refund}, + tokens::{Fortitude, Precision, Preservation}, + AccountTouch, +}; +use pallet_asset_conversion::{PoolLocator, Pools}; +use sp_runtime::traits::{TryConvert, Zero}; +use sp_std::boxed::Box; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + pallet_asset_conversion::Config< + PoolId = ( + ::AssetKind, + ::AssetKind, + ), + > + frame_system::Config + { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Type previously used to derive the account ID for a pool. Indicates that the pool's + /// liquidity assets are located at this account before the migration. + type PriorAccountIdConverter: for<'a> TryConvert< + &'a (Self::AssetKind, Self::AssetKind), + Self::AccountId, + >; + + /// Retrieves information about an existing deposit for a given account ID and asset from + /// the [`pallet_asset_conversion::Config::Assets`] registry and can initiate the refund. + type AssetsRefund: Refund< + Self::AccountId, + AssetId = Self::AssetKind, + Balance = >::Balance, + >; + + /// Retrieves information about an existing deposit for a given account ID and asset from + /// the [`pallet_asset_conversion::Config::PoolAssets`] registry and can initiate the + /// refund. + type PoolAssetsRefund: Refund< + Self::AccountId, + AssetId = Self::PoolAssetId, + Balance = >::Balance, + >; + + /// Means to reset the team for assets from the + /// [`pallet_asset_conversion::Config::PoolAssets`] registry. + type PoolAssetsTeam: ResetTeam; + + /// Registry of an asset used as an account deposit for the + /// [`pallet_asset_conversion::Config::Assets`] and + /// [`pallet_asset_conversion::Config::PoolAssets`] registries. + type DepositAsset: FungibleMutate; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + // Pallet's events. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Indicates that a pool has been migrated to the new account ID. + MigratedToNewAccount { + /// Pool's ID. + pool_id: T::PoolId, + /// Pool's prior account ID. + prior_account: T::AccountId, + /// Pool's new account ID. + new_account: T::AccountId, + }, + } + + #[pallet::error] + pub enum Error { + /// Provided asset pair is not supported for pool. + InvalidAssetPair, + /// The pool doesn't exist. + PoolNotFound, + /// Pool's balance cannot be zero. + ZeroBalance, + /// Indicates a partial transfer of balance to the new account during a migration. + PartialTransfer, + } + + /// Pallet's callable functions. + #[pallet::call] + impl Pallet { + /// Migrates an existing pool to a new account ID derivation method for a given asset pair. + /// If the migration is successful, transaction fees are refunded to the caller. + /// + /// Must be signed. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::migrate_to_new_account())] + pub fn migrate_to_new_account( + origin: OriginFor, + asset1: Box, + asset2: Box, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + let info = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; + + let (prior_account, new_account) = + Self::addresses(&pool_id).ok_or(Error::::InvalidAssetPair)?; + + let (asset1, asset2) = pool_id.clone(); + + // Assets that must be transferred to the new account id. + let balance1 = T::Assets::total_balance(asset1.clone(), &prior_account); + let balance2 = T::Assets::total_balance(asset2.clone(), &prior_account); + let lp_balance = T::PoolAssets::total_balance(info.lp_token.clone(), &prior_account); + + ensure!(!balance1.is_zero(), Error::::ZeroBalance); + ensure!(!balance2.is_zero(), Error::::ZeroBalance); + ensure!(!lp_balance.is_zero(), Error::::ZeroBalance); + + // Check if a deposit needs to be placed for the new account. If so, mint the + // required deposit amount to the depositor's account to ensure the deposit can be + // provided. Once the deposit from the prior account is returned, the minted assets will + // be burned. Touching the new account is necessary because it's not possible to + // transfer assets to the new account if it's required. Additionally, the deposit cannot + // be refunded from the prior account until its balance is zero. + + let deposit_asset_ed = T::DepositAsset::minimum_balance(); + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset1.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::Assets::touch(asset1.clone(), &new_account, &depositor)?; + } + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset2.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::Assets::touch(asset2.clone(), &new_account, &depositor)?; + } + + if let Some((depositor, deposit)) = + T::PoolAssetsRefund::deposit_held(info.lp_token.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::PoolAssets::touch(info.lp_token.clone(), &new_account, &depositor)?; + } + + // Transfer all pool related assets to the new account. + + ensure!( + balance1 == + T::Assets::transfer( + asset1.clone(), + &prior_account, + &new_account, + balance1, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + ensure!( + balance2 == + T::Assets::transfer( + asset2.clone(), + &prior_account, + &new_account, + balance2, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + ensure!( + lp_balance == + T::PoolAssets::transfer( + info.lp_token.clone(), + &prior_account, + &new_account, + lp_balance, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + // Refund deposits from prior accounts and burn previously minted assets. + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset1.clone(), prior_account.clone()) + { + T::AssetsRefund::refund(asset1.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset2.clone(), prior_account.clone()) + { + T::AssetsRefund::refund(asset2.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + if let Some((depositor, deposit)) = + T::PoolAssetsRefund::deposit_held(info.lp_token.clone(), prior_account.clone()) + { + T::PoolAssetsRefund::refund(info.lp_token.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + T::PoolAssetsTeam::reset_team( + info.lp_token, + new_account.clone(), + new_account.clone(), + new_account.clone(), + new_account.clone(), + )?; + + Self::deposit_event(Event::MigratedToNewAccount { + pool_id, + prior_account, + new_account, + }); + + Ok(Pays::No.into()) + } + } + + impl Pallet { + /// Returns the prior and new account IDs for a given pool ID. The prior account ID comes + /// first in the tuple. + #[cfg(not(any(test, feature = "runtime-benchmarks")))] + fn addresses(pool_id: &T::PoolId) -> Option<(T::AccountId, T::AccountId)> { + match ( + T::PriorAccountIdConverter::try_convert(pool_id), + T::PoolLocator::address(pool_id), + ) { + (Ok(a), Ok(b)) if a != b => Some((a, b)), + _ => None, + } + } + + /// Returns the prior and new account IDs for a given pool ID. The prior account ID comes + /// first in the tuple. + /// + /// This function is intended for use only in test and benchmark environments. The prior + /// account ID represents the new account ID from [`Config::PoolLocator`], allowing the use + /// of the main pallet's calls to set up a pool with liquidity placed in that account and + /// migrate it to another account, which in this case is the result of + /// [`Config::PriorAccountIdConverter`]. + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub(crate) fn addresses(pool_id: &T::PoolId) -> Option<(T::AccountId, T::AccountId)> { + match ( + T::PoolLocator::address(pool_id), + T::PriorAccountIdConverter::try_convert(pool_id), + ) { + (Ok(a), Ok(b)) if a != b => Some((a, b)), + _ => None, + } + } + } +} diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs new file mode 100644 index 000000000000..9454b3a9ad44 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/mock.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Asset Conversion Ops pallet. + +use super::*; +use crate as pallet_asset_conversion_ops; +use core::default::Default; +use frame_support::{ + construct_runtime, derive_impl, + instances::{Instance1, Instance2}, + ord_parameter_types, parameter_types, + traits::{ + tokens::{ + fungible::{NativeFromLeft, NativeOrWithId, UnionOf}, + imbalance::ResolveAssetTo, + }, + AsEnsureOriginWithArg, ConstU32, ConstU64, + }, + PalletId, +}; +use frame_system::{EnsureSigned, EnsureSignedBy}; +use pallet_asset_conversion::{self, AccountIdConverter, AccountIdConverterNoSeed, Ascending}; +use sp_arithmetic::Permill; +use sp_runtime::{traits::AccountIdConversion, BuildStorage}; + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Assets: pallet_assets::, + PoolAssets: pallet_assets::, + AssetConversion: pallet_asset_conversion, + AssetConversionOps: pallet_asset_conversion_ops, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; +} + +#[derive_impl(pallet_assets::config_preludes::TestDefaultConfig)] +impl pallet_assets::Config for Test { + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Freezer = (); +} + +#[derive_impl(pallet_assets::config_preludes::TestDefaultConfig)] +impl pallet_assets::Config for Test { + type Currency = Balances; + type CreateOrigin = + AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Freezer = (); +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub const Native: NativeOrWithId = NativeOrWithId::Native; + pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); +} + +ord_parameter_types! { + pub const AssetConversionOrigin: u64 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +pub type NativeAndAssets = UnionOf, u64>; +pub type PoolIdToAccountId = + AccountIdConverter, NativeOrWithId)>; +pub type AscendingLocator = Ascending, PoolIdToAccountId>; + +impl pallet_asset_conversion::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = ::Balance; + type HigherPrecisionBalance = sp_core::U256; + type AssetKind = NativeOrWithId; + type Assets = NativeAndAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = AscendingLocator; + type PoolAssetId = u32; + type PoolAssets = PoolAssets; + type PoolSetupFee = ConstU64<100>; + type PoolSetupFeeAsset = Native; + type PoolSetupFeeTarget = ResolveAssetTo; + type PalletId = AssetConversionPalletId; + type WeightInfo = (); + type LPFee = ConstU32<3>; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type MaxSwapPathLength = ConstU32<4>; + type MintMinLiquidity = ConstU64<100>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +pub type OldPoolIdToAccountId = + AccountIdConverterNoSeed<(NativeOrWithId, NativeOrWithId)>; + +impl pallet_asset_conversion_ops::Config for Test { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = OldPoolIdToAccountId; + type AssetsRefund = NativeAndAssets; + type PoolAssetsRefund = PoolAssets; + type PoolAssetsTeam = PoolAssets; + type DepositAsset = Balances; + type WeightInfo = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10000), (2, 20000), (3, 30000), (4, 40000)], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/substrate/frame/asset-conversion/ops/src/tests.rs b/substrate/frame/asset-conversion/ops/src/tests.rs new file mode 100644 index 000000000000..84bbe6336747 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/tests.rs @@ -0,0 +1,308 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Asset Conversion Ops pallet tests. + +use crate::{mock::*, *}; +use frame_support::{ + assert_noop, assert_ok, + traits::{ + fungible::{Inspect as FungibleInspect, NativeOrWithId}, + fungibles::{Create, Inspect}, + Incrementable, + }, +}; + +#[test] +fn migrate_pool_account_id_with_native() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = Balances::minimum_balance(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000 + ed); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000 + ed); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_pool_account_id_with_insufficient_assets() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::WithId(1); + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_1.clone(), user, false, 1)); + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 20000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_pool_account_id_with_sufficient_assets() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::WithId(1); + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_1.clone(), user, true, 1)); + assert_ok!(NativeAndAssets::create(token_2.clone(), user, true, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 20000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_empty_pool_account_id() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + // migrate. + assert_noop!( + AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + ), + Error::::ZeroBalance + ); + }); +} diff --git a/substrate/frame/asset-conversion/ops/src/weights.rs b/substrate/frame/asset-conversion/ops/src/weights.rs new file mode 100644 index 000000000000..9e7379c50156 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/weights.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/production/substrate-node +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion_ops +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/asset-conversion-ops/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_ops`. +pub trait WeightInfo { + fn migrate_to_new_account() -> Weight; +} + +/// Weights for `pallet_asset_conversion_ops` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:4 w:4) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1762` + // Estimated: `11426` + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:4 w:4) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1762` + // Estimated: `11426` + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().writes(11_u64)) + } +} diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 4591b87c1867..477866e0051b 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -137,8 +137,11 @@ ord_parameter_types! { } pub type NativeAndAssets = UnionOf, u128>; -pub type AscendingLocator = Ascending>; -pub type WithFirstAssetLocator = WithFirstAsset>; +pub type PoolIdToAccountId = + AccountIdConverter, NativeOrWithId)>; +pub type AscendingLocator = Ascending, PoolIdToAccountId>; +pub type WithFirstAssetLocator = + WithFirstAsset, PoolIdToAccountId>; impl Config for Test { type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/asset-conversion/src/types.rs b/substrate/frame/asset-conversion/src/types.rs index 5ee81c2012de..27c0e8e68805 100644 --- a/substrate/frame/asset-conversion/src/types.rs +++ b/substrate/frame/asset-conversion/src/types.rs @@ -19,6 +19,7 @@ use super::*; use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use scale_info::TypeInfo; +use sp_runtime::traits::TryConvert; /// Represents a swap path with associated asset amounts indicating how much of the asset needs to /// be deposited to get the following asset's amount withdrawn (this is inclusive of fees). @@ -68,49 +69,59 @@ pub trait PoolLocator { /// /// The `PoolId` is represented as a tuple of `AssetKind`s with `FirstAsset` always positioned as /// the first element. -pub struct WithFirstAsset( - PhantomData<(FirstAsset, AccountId, AssetKind)>, +pub struct WithFirstAsset( + PhantomData<(FirstAsset, AccountId, AssetKind, AccountIdConverter)>, ); -impl PoolLocator - for WithFirstAsset +impl + PoolLocator + for WithFirstAsset where AssetKind: Eq + Clone + Encode, AccountId: Decode, FirstAsset: Get, + AccountIdConverter: for<'a> TryConvert<&'a (AssetKind, AssetKind), AccountId>, { fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { + if asset1 == asset2 { + return Err(()); + } let first = FirstAsset::get(); - match true { - _ if asset1 == asset2 => Err(()), - _ if first == *asset1 => Ok((first, asset2.clone())), - _ if first == *asset2 => Ok((first, asset1.clone())), - _ => Err(()), + if first == *asset1 { + Ok((first, asset2.clone())) + } else if first == *asset2 { + Ok((first, asset1.clone())) + } else { + Err(()) } } fn address(id: &(AssetKind, AssetKind)) -> Result { - let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); - Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) + AccountIdConverter::try_convert(id).map_err(|_| ()) } } /// Pool locator where the `PoolId` is a tuple of `AssetKind`s arranged in ascending order. -pub struct Ascending(PhantomData<(AccountId, AssetKind)>); -impl PoolLocator - for Ascending +pub struct Ascending( + PhantomData<(AccountId, AssetKind, AccountIdConverter)>, +); +impl + PoolLocator + for Ascending where AssetKind: Ord + Clone + Encode, AccountId: Decode, + AccountIdConverter: for<'a> TryConvert<&'a (AssetKind, AssetKind), AccountId>, { fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { - match true { - _ if asset1 > asset2 => Ok((asset2.clone(), asset1.clone())), - _ if asset1 < asset2 => Ok((asset1.clone(), asset2.clone())), - _ => Err(()), + if asset1 > asset2 { + Ok((asset2.clone(), asset1.clone())) + } else if asset1 < asset2 { + Ok((asset1.clone(), asset2.clone())) + } else { + Err(()) } } fn address(id: &(AssetKind, AssetKind)) -> Result { - let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); - Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) + AccountIdConverter::try_convert(id).map_err(|_| ()) } } @@ -131,3 +142,30 @@ where First::address(id).or(Second::address(id)) } } + +/// `PoolId` to `AccountId` conversion. +pub struct AccountIdConverter(PhantomData<(Seed, PoolId)>); +impl TryConvert<&PoolId, AccountId> for AccountIdConverter +where + PoolId: Encode, + AccountId: Decode, + Seed: Get, +{ + fn try_convert(id: &PoolId) -> Result { + sp_io::hashing::blake2_256(&Encode::encode(&(Seed::get(), id))[..]) + .using_encoded(|e| Decode::decode(&mut TrailingZeroInput::new(e)).map_err(|_| id)) + } +} + +/// `PoolId` to `AccountId` conversion without an addition arguments to the seed. +pub struct AccountIdConverterNoSeed(PhantomData); +impl TryConvert<&PoolId, AccountId> for AccountIdConverterNoSeed +where + PoolId: Encode, + AccountId: Decode, +{ + fn try_convert(id: &PoolId) -> Result { + sp_io::hashing::blake2_256(&Encode::encode(id)[..]) + .using_encoded(|e| Decode::decode(&mut TrailingZeroInput::new(e)).map_err(|_| id)) + } +} diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 3b95750c14c8..ed6df77e1523 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "29.0.0" +version = "29.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 8791aaa736b3..9309d0101175 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -368,11 +368,14 @@ impl, I: 'static> Pallet { Ok(()) } - /// Returns a `DepositFrom` of an account only if balance is zero. + /// Refunds the `DepositFrom` of an account only if its balance is zero. + /// + /// If the `maybe_check_caller` parameter is specified, it must match the account that provided + /// the deposit or must be the admin of the asset. pub(super) fn do_refund_other( id: T::AssetId, who: &T::AccountId, - caller: &T::AccountId, + maybe_check_caller: Option, ) -> DispatchResult { let mut account = Account::::get(&id, &who).ok_or(Error::::NoDeposit)?; let (depositor, deposit) = @@ -380,7 +383,9 @@ impl, I: 'static> Pallet { let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(!account.status.is_frozen(), Error::::Frozen); - ensure!(caller == &depositor || caller == &details.admin, Error::::NoPermission); + if let Some(caller) = maybe_check_caller { + ensure!(caller == depositor || caller == details.admin, Error::::NoPermission); + } ensure!(account.balance.is_zero(), Error::::WouldBurn); T::Currency::unreserve(&depositor, deposit); @@ -491,7 +496,7 @@ impl, I: 'static> Pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); let actual = Self::decrease_balance(id.clone(), target, amount, f, |actual, details| { @@ -1013,4 +1018,28 @@ impl, I: 'static> Pallet { }) .collect::>() } + + /// Reset the team for the asset with the given `id`. + /// + /// ### Parameters + /// - `id`: The identifier of the asset for which the team is being reset. + /// - `owner`: The new `owner` account for the asset. + /// - `admin`: The new `admin` account for the asset. + /// - `issuer`: The new `issuer` account for the asset. + /// - `freezer`: The new `freezer` account for the asset. + pub(crate) fn do_reset_team( + id: T::AssetId, + owner: T::AccountId, + admin: T::AccountId, + issuer: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + let mut d = Asset::::get(&id).ok_or(Error::::Unknown)?; + d.owner = owner; + d.admin = admin; + d.issuer = issuer; + d.freezer = freezer; + Asset::::insert(&id, d); + Ok(()) + } } diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 123abeba8283..9f837a604341 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -308,3 +308,35 @@ impl, I: 'static> fungibles::InspectEnumerable for Pa Asset::::iter_keys() } } + +impl, I: 'static> fungibles::roles::ResetTeam for Pallet { + fn reset_team( + id: T::AssetId, + owner: T::AccountId, + admin: T::AccountId, + issuer: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + Self::do_reset_team(id, owner, admin, issuer, freezer) + } +} + +impl, I: 'static> fungibles::Refund for Pallet { + type AssetId = T::AssetId; + type Balance = DepositBalanceOf; + fn deposit_held(id: Self::AssetId, who: T::AccountId) -> Option<(T::AccountId, Self::Balance)> { + use ExistenceReason::*; + match Account::::get(&id, &who).ok_or(Error::::NoDeposit).ok()?.reason { + DepositHeld(b) => Some((who, b)), + DepositFrom(d, b) => Some((d, b)), + _ => None, + } + } + fn refund(id: Self::AssetId, who: T::AccountId) -> DispatchResult { + match Self::deposit_held(id.clone(), who.clone()) { + Some((d, _)) if d == who => Self::do_refund(id, who, false), + Some(..) => Self::do_refund_other(id, &who, None), + None => Err(Error::::NoDeposit.into()), + } + } +} diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 6891f04dfb51..37073b280655 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -987,7 +987,7 @@ pub mod pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; @@ -1024,7 +1024,7 @@ pub mod pallet { let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( details.status == AssetStatus::Live || details.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == details.admin, Error::::NoPermission); let who = T::Lookup::lookup(who)?; @@ -1113,7 +1113,7 @@ pub mod pallet { Asset::::try_mutate(id.clone(), |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(details.status == AssetStatus::Live, Error::::LiveAsset); + ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { return Ok(()) @@ -1644,7 +1644,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; let id: T::AssetId = id.into(); - Self::do_refund_other(id, &who, &origin) + Self::do_refund_other(id, &who, Some(origin)) } /// Disallow further unprivileged transfers of an asset `id` to and from an account `who`. @@ -1669,7 +1669,7 @@ pub mod pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs index 9d1ece7a1d8e..d59c219d3e71 100644 --- a/substrate/frame/beefy-mmr/src/mock.rs +++ b/substrate/frame/beefy-mmr/src/mock.rs @@ -90,6 +90,8 @@ impl pallet_mmr::Config for Test { type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type BlockHashProvider = pallet_mmr::DefaultBlockHashProvider; + type WeightInfo = (); } diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 969f13e269de..ce8d41530451 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -15,6 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } @@ -29,6 +30,7 @@ frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } +pretty_assertions = "1.3.0" [features] default = ["std"] @@ -39,6 +41,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-api/std", "sp-arithmetic/std", diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 98ac074ca917..1fc1c3a101ab 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -313,8 +313,8 @@ mod benches { assert_last_event::( Event::Transferred { region_id: region, - old_owner: caller, - owner: recipient, + old_owner: Some(caller), + owner: Some(recipient), duration: 3u32.into(), } .into(), diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index ef20bc8fb80b..cb7393cc9e32 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -81,7 +81,8 @@ impl Pallet { last_timeslice: Self::current_timeslice(), }; let now = frame_system::Pallet::::block_number(); - let new_sale = SaleInfoRecord { + // Imaginary old sale for bootstrapping the first actual sale: + let old_sale = SaleInfoRecord { sale_start: now, leadin_length: Zero::zero(), price, @@ -94,7 +95,7 @@ impl Pallet { cores_sold: 0, }; Self::deposit_event(Event::::SalesStarted { price, core_count }); - Self::rotate_sale(new_sale, &config, &status); + Self::rotate_sale(old_sale, &config, &status); Status::::put(&status); Ok(()) } @@ -119,7 +120,8 @@ impl Pallet { sale.sellout_price = Some(price); } SaleInfo::::put(&sale); - let id = Self::issue(core, sale.region_begin, sale.region_end, who.clone(), Some(price)); + let id = + Self::issue(core, sale.region_begin, sale.region_end, Some(who.clone()), Some(price)); let duration = sale.region_end.saturating_sub(sale.region_begin); Self::deposit_event(Event::Purchased { who, region_id: id, price, duration }); Ok(id) @@ -177,11 +179,11 @@ impl Pallet { let mut region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } let old_owner = region.owner; - region.owner = new_owner; + region.owner = Some(new_owner); Regions::::insert(®ion_id, ®ion); let duration = region.end.saturating_sub(region_id.begin); Self::deposit_event(Event::Transferred { @@ -202,7 +204,7 @@ impl Pallet { let mut region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } let pivot = region_id.begin.saturating_add(pivot_offset); ensure!(pivot < region.end, Error::::PivotTooLate); @@ -226,7 +228,7 @@ impl Pallet { let region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } ensure!((pivot & !region_id.mask).is_void(), Error::::ExteriorPivot); @@ -418,7 +420,10 @@ impl Pallet { pub(crate) fn do_drop_history(when: Timeslice) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; let status = Status::::get().ok_or(Error::::Uninitialized)?; - ensure!(status.last_timeslice > when + config.contribution_timeout, Error::::StillValid); + ensure!( + status.last_timeslice > when.saturating_add(config.contribution_timeout), + Error::::StillValid + ); let record = InstaPoolHistory::::take(when).ok_or(Error::::NoHistory)?; if let Some(payout) = record.maybe_payout { let _ = Self::charge(&Self::account_id(), payout); diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index a39576b09013..1ef9e59f0186 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -36,6 +36,7 @@ mod tick_impls; mod types; mod utility_impls; +pub mod migration; pub mod runtime_api; pub mod weights; @@ -46,6 +47,9 @@ pub use core_mask::*; pub use coretime_interface::*; pub use types::*; +/// The log target for this pallet. +const LOG_TARGET: &str = "runtime::broker"; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -61,7 +65,10 @@ pub mod pallet { use sp_runtime::traits::{Convert, ConvertBack}; use sp_std::vec::Vec; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -216,9 +223,9 @@ pub mod pallet { /// The duration of the Region. duration: Timeslice, /// The old owner of the Region. - old_owner: T::AccountId, + old_owner: Option, /// The new owner of the Region. - owner: T::AccountId, + owner: Option, }, /// A Region has been split into two non-overlapping Regions. Partitioned { @@ -552,16 +559,27 @@ pub mod pallet { /// /// - `origin`: Must be Root or pass `AdminOrigin`. /// - `initial_price`: The price of Bulk Coretime in the first sale. - /// - `core_count`: The number of cores which can be allocated. + /// - `total_core_count`: This is the total number of cores the relay chain should have + /// after the sale concludes. + /// + /// NOTE: This function does not actually request that new core count from the relay chain. + /// You need to make sure to call `request_core_count` afterwards to bring the relay chain + /// in sync. + /// + /// When to call the function depends on the new core count. If it is larger than what it + /// was before, you can call it immediately or even before `start_sales` as non allocated + /// cores will just be `Idle`. If you are actually reducing the number of cores, you should + /// call `request_core_count`, right before the next sale, to avoid shutting down tasks too + /// early. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::start_sales((*core_count).into()))] + #[pallet::weight(T::WeightInfo::start_sales((*total_core_count).into()))] pub fn start_sales( origin: OriginFor, initial_price: BalanceOf, - core_count: CoreIndex, + total_core_count: CoreIndex, ) -> DispatchResultWithPostInfo { T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_start_sales(initial_price, core_count)?; + Self::do_start_sales(initial_price, total_core_count)?; Ok(Pays::No.into()) } diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs new file mode 100644 index 000000000000..95aa28250a62 --- /dev/null +++ b/substrate/frame/broker/src/migration.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::types::RegionRecord; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; +use sp_runtime::Saturating; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +mod v1 { + use super::*; + + /// V0 region record. + #[derive(Encode, Decode)] + struct RegionRecordV0 { + /// The end of the Region. + pub end: Timeslice, + /// The owner of the Region. + pub owner: AccountId, + /// The amount paid to Polkadot for this Region, or `None` if renewal is not allowed. + pub paid: Option, + } + + pub struct MigrateToV1Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV1Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut count: u64 = 0; + + >::translate::>, _>(|_, v0| { + count.saturating_inc(); + Some(RegionRecord { end: v0.end, owner: Some(v0.owner), paid: v0.paid }) + }); + + log::info!( + target: LOG_TARGET, + "Storage migration v1 for pallet-broker finished.", + ); + + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok((Regions::::iter_keys().count() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_count = u32::decode(&mut &state[..]).expect("Known good"); + let new_count = Regions::::iter_values().count() as u32; + + ensure!(old_count == new_count, "Regions count should not change"); + Ok(()) + } + } +} + +/// Migrate the pallet storage from `0` to `1`. +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + v1::MigrateToV1Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index c7205058c972..6219b4eff1b4 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -29,7 +29,7 @@ use frame_support::{ }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_arithmetic::Perbill; -use sp_core::{ConstU32, ConstU64}; +use sp_core::{ConstU32, ConstU64, Get}; use sp_runtime::{ traits::{BlockNumberProvider, Identity}, BuildStorage, Saturating, @@ -210,6 +210,15 @@ pub fn advance_to(b: u64) { } } +pub fn advance_sale_period() { + let sale = SaleInfo::::get().unwrap(); + + let target_block_number = + sale.region_begin as u64 * <::TimeslicePeriod as Get>::get(); + + advance_to(target_block_number) +} + pub fn pot() -> u64 { balance(Broker::account_id()) } diff --git a/substrate/frame/broker/src/nonfungible_impl.rs b/substrate/frame/broker/src/nonfungible_impl.rs index b2e88bf09a0e..80dcc175df53 100644 --- a/substrate/frame/broker/src/nonfungible_impl.rs +++ b/substrate/frame/broker/src/nonfungible_impl.rs @@ -25,12 +25,13 @@ use sp_std::vec::Vec; impl Inspect for Pallet { type ItemId = u128; - fn owner(index: &Self::ItemId) -> Option { - Regions::::get(RegionId::from(*index)).map(|r| r.owner) + fn owner(item: &Self::ItemId) -> Option { + let record = Regions::::get(RegionId::from(*item))?; + record.owner } - fn attribute(index: &Self::ItemId, key: &[u8]) -> Option> { - let id = RegionId::from(*index); + fn attribute(item: &Self::ItemId, key: &[u8]) -> Option> { + let id = RegionId::from(*item); let item = Regions::::get(id)?; match key { b"begin" => Some(id.begin.encode()), @@ -46,11 +47,49 @@ impl Inspect for Pallet { } impl Transfer for Pallet { - fn transfer(index: &Self::ItemId, dest: &T::AccountId) -> DispatchResult { - Self::do_transfer((*index).into(), None, dest.clone()).map_err(Into::into) + fn transfer(item: &Self::ItemId, dest: &T::AccountId) -> DispatchResult { + Self::do_transfer((*item).into(), None, dest.clone()).map_err(Into::into) } } -// We don't allow any of the mutate operations, so the default implementation is used, which will -// return `TokenError::Unsupported` in case any of the operations is called. -impl Mutate for Pallet {} +/// We don't really support burning and minting. +/// +/// We only need this to allow the region to be reserve transferable. +/// +/// For reserve transfers that are not 'local', the asset must first be withdrawn to the holding +/// register and then deposited into the designated account. This process necessitates that the +/// asset is capable of being 'burned' and 'minted'. +/// +/// Since each region is associated with specific record data, we will not actually burn the asset. +/// If we did, we wouldn't know what record to assign to the newly minted region. Therefore, instead +/// of burning, we set the asset's owner to `None`. In essence, 'burning' a region involves setting +/// its owner to `None`, whereas 'minting' the region assigns its owner to an actual account. This +/// way we never lose track of the associated record data. +impl Mutate for Pallet { + /// Deposit a region into an account. + fn mint_into(item: &Self::ItemId, who: &T::AccountId) -> DispatchResult { + let region_id: RegionId = (*item).into(); + let record = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; + + // 'Minting' can only occur if the asset has previously been burned (i.e. moved to the + // holding register) + ensure!(record.owner.is_none(), Error::::NotAllowed); + Self::issue(region_id.core, region_id.begin, record.end, Some(who.clone()), record.paid); + + Ok(()) + } + + /// Withdraw a region from account. + fn burn(item: &Self::ItemId, maybe_check_owner: Option<&T::AccountId>) -> DispatchResult { + let region_id: RegionId = (*item).into(); + let mut record = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; + if let Some(owner) = maybe_check_owner { + ensure!(Some(owner.clone()) == record.owner, Error::::NotOwner); + } + + record.owner = None; + Regions::::insert(region_id, record); + + Ok(()) + } +} diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 0aacd7ef3696..c573b6c55a20 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -24,6 +24,7 @@ use frame_support::{ BoundedVec, }; use frame_system::RawOrigin::Root; +use pretty_assertions::assert_eq; use sp_runtime::{traits::Get, TokenError}; use CoreAssignment::*; use CoretimeTraceItem::*; @@ -145,6 +146,7 @@ fn drop_history_works() { advance_to(16); assert_eq!(InstaPoolHistory::::iter().count(), 6); advance_to(17); + assert_noop!(Broker::do_drop_history(u32::MAX), Error::::StillValid); assert_noop!(Broker::do_drop_history(region.begin), Error::::StillValid); advance_to(18); assert_eq!(InstaPoolHistory::::iter().count(), 6); @@ -198,14 +200,35 @@ fn transfer_works() { } #[test] -fn mutate_operations_unsupported_for_regions() { - TestExt::new().execute_with(|| { +fn mutate_operations_work() { + TestExt::new().endow(1, 1000).execute_with(|| { let region_id = RegionId { begin: 0, core: 0, mask: CoreMask::complete() }; assert_noop!( >::mint_into(®ion_id.into(), &2), - TokenError::Unsupported + Error::::UnknownRegion ); - assert_noop!(>::burn(®ion_id.into(), None), TokenError::Unsupported); + + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + let region_id = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_noop!( + >::mint_into(®ion_id.into(), &2), + Error::::NotAllowed + ); + + assert_noop!( + >::burn(®ion_id.into(), Some(&2)), + Error::::NotOwner + ); + // 'withdraw' the region from user 1: + assert_ok!(>::burn(®ion_id.into(), Some(&1))); + assert_eq!(Regions::::get(region_id).unwrap().owner, None); + + // `mint_into` works after burning: + assert_ok!(>::mint_into(®ion_id.into(), &2)); + assert_eq!(Regions::::get(region_id).unwrap().owner, Some(2)); + + // Unsupported operations: assert_noop!( >::set_attribute(®ion_id.into(), &[], &[]), TokenError::Unsupported @@ -283,7 +306,7 @@ fn nft_metadata_works() { assert_eq!(attribute::(region, b"begin"), 4); assert_eq!(attribute::(region, b"length"), 3); assert_eq!(attribute::(region, b"end"), 7); - assert_eq!(attribute::(region, b"owner"), 1); + assert_eq!(attribute::>(region, b"owner"), Some(1)); assert_eq!(attribute::(region, b"part"), 0xfffff_fffff_fffff_fffff.into()); assert_eq!(attribute::(region, b"core"), 0); assert_eq!(attribute::>(region, b"paid"), Some(100)); @@ -295,7 +318,7 @@ fn nft_metadata_works() { assert_eq!(attribute::(region, b"begin"), 6); assert_eq!(attribute::(region, b"length"), 1); assert_eq!(attribute::(region, b"end"), 7); - assert_eq!(attribute::(region, b"owner"), 42); + assert_eq!(attribute::>(region, b"owner"), Some(42)); assert_eq!(attribute::(region, b"part"), 0x00000_fffff_fffff_00000.into()); assert_eq!(attribute::(region, b"core"), 0); assert_eq!(attribute::>(region, b"paid"), None); @@ -891,6 +914,161 @@ fn short_leases_are_cleaned() { }); } +#[test] +fn leases_can_be_renewed() { + TestExt::new().endow(1, 1000).execute_with(|| { + // Timeslice period is 2. + // + // Sale 1 starts at block 7, Sale 2 starts at 13. + + // Set lease to expire in sale 1 and start sales. + assert_ok!(Broker::do_set_lease(2001, 9)); + assert_eq!(Leases::::get().len(), 1); + // Start the sales with only one core for this lease. + assert_ok!(Broker::do_start_sales(100, 1)); + + // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next + // sale. + advance_sale_period(); + assert_eq!( + AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), + Some(AllowedRenewalRecord { + price: 100, + completion: CompletionStatus::Complete( + vec![ScheduleItem { mask: CoreMask::complete(), assignment: Task(2001) }] + .try_into() + .unwrap() + ) + }) + ); + // And the lease has been removed from storage. + assert_eq!(Leases::::get().len(), 0); + + // Advance to sale period 2, where we can renew. + advance_sale_period(); + assert_ok!(Broker::do_renew(1, 0)); + // We renew for the base price of the previous sale period. + assert_eq!(balance(1), 900); + + // We just renewed for this period. + advance_sale_period(); + // Now we are off core and the core is pooled. + advance_sale_period(); + // Check the trace agrees. + assert_eq!( + CoretimeTrace::get(), + vec![ + // Period 0 gets no assign core, but leases are on-core. + // Period 1: + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 2 - expiring at the end of this period, so we called renew. + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 3 - we get assigned a core because we called renew in period 2. + ( + 18, + AssignCore { + core: 0, + begin: 20, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 4 - we don't get a core as we didn't call renew again. + // This core is recycled into the pool. + ( + 24, + AssignCore { + core: 0, + begin: 26, + assignment: vec![(CoreAssignment::Pool, 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} + +// We understand that this does not work as intended for leases that expire within `region_length` +// timeslices after calling `start_sales`. +#[test] +fn short_leases_cannot_be_renewed() { + TestExt::new().endow(1, 1000).execute_with(|| { + // Timeslice period is 2. + // + // Sale 1 starts at block 7, Sale 2 starts at 13. + + // Set lease to expire in sale period 0 and start sales. + assert_ok!(Broker::do_set_lease(2001, 3)); + assert_eq!(Leases::::get().len(), 1); + // Start the sales with one core for this lease. + assert_ok!(Broker::do_start_sales(100, 1)); + + // The lease is removed. + assert_eq!(Leases::::get().len(), 0); + + // We should have got an entry in AllowedRenewals, but we don't because rotate_sale + // schedules leases a period in advance. This renewal should be in the period after next + // because while bootstrapping our way into the sale periods, we give everything a lease for + // period 1, so they can renew for period 2. So we have a core until the end of period 1, + // but we are not marked as able to renew because we expired before sale period 1 starts. + // + // This should be fixed. + assert_eq!(AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), None); + // And the lease has been removed from storage. + assert_eq!(Leases::::get().len(), 0); + + // Advance to sale period 2, where we now cannot renew. + advance_to(13); + assert_noop!(Broker::do_renew(1, 0), Error::::NotAllowed); + + // Check the trace. + assert_eq!( + CoretimeTrace::get(), + vec![ + // Period 0 gets no assign core, but leases are on-core. + // Period 1 we get assigned a core due to the way the sales are bootstrapped. + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 2 - we don't get a core as we couldn't renew. + // This core is recycled into the pool. + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(CoreAssignment::Pool, 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} + #[test] fn leases_are_limited() { TestExt::new().execute_with(|| { @@ -1092,3 +1270,141 @@ fn config_works() { assert_noop!(Broker::configure(Root.into(), cfg), Error::::InvalidConfig); }); } + +/// Ensure that a lease that ended before `start_sales` was called can be renewed. +#[test] +fn renewal_works_leases_ended_before_start_sales() { + TestExt::new().endow(1, 1000).execute_with(|| { + let config = Configuration::::get().unwrap(); + + // This lease is ended before `start_stales` was called. + assert_ok!(Broker::do_set_lease(1, 1)); + + // Go to some block to ensure that the lease of task 1 already ended. + advance_to(5); + + // This lease will end three sale periods in. + assert_ok!(Broker::do_set_lease( + 2, + Broker::latest_timeslice_ready_to_commit(&config) + config.region_length * 3 + )); + + // This intializes the first sale and the period 0. + assert_ok!(Broker::do_start_sales(100, 2)); + assert_noop!(Broker::do_renew(1, 1), Error::::Unavailable); + assert_noop!(Broker::do_renew(1, 0), Error::::Unavailable); + + // Lease for task 1 should have been dropped. + assert!(Leases::::get().iter().any(|l| l.task == 2)); + + // This intializes the second and the period 1. + advance_sale_period(); + + // Now we can finally renew the core 0 of task 1. + let new_core = Broker::do_renew(1, 0).unwrap(); + // Renewing the active lease doesn't work. + assert_noop!(Broker::do_renew(1, 1), Error::::SoldOut); + assert_eq!(balance(1), 900); + + // This intializes the third sale and the period 2. + advance_sale_period(); + let new_core = Broker::do_renew(1, new_core).unwrap(); + + // Renewing the active lease doesn't work. + assert_noop!(Broker::do_renew(1, 0), Error::::SoldOut); + assert_eq!(balance(1), 800); + + // All leases should have ended + assert!(Leases::::get().is_empty()); + + // This intializes the fourth sale and the period 3. + advance_sale_period(); + + // Renew again + assert_eq!(0, Broker::do_renew(1, new_core).unwrap()); + // Renew the task 2. + assert_eq!(1, Broker::do_renew(1, 0).unwrap()); + assert_eq!(balance(1), 600); + + // This intializes the fifth sale and the period 4. + advance_sale_period(); + + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 10, + AssignCore { + core: 0, + begin: 12, + assignment: vec![(Task(1), 57600)], + end_hint: None + } + ), + ( + 10, + AssignCore { + core: 1, + begin: 12, + assignment: vec![(Task(2), 57600)], + end_hint: None + } + ), + ( + 16, + AssignCore { + core: 0, + begin: 18, + assignment: vec![(Task(2), 57600)], + end_hint: None + } + ), + ( + 16, + AssignCore { + core: 1, + begin: 18, + assignment: vec![(Task(1), 57600)], + end_hint: None + } + ), + ( + 22, + AssignCore { + core: 0, + begin: 24, + assignment: vec![(Task(2), 57600)], + end_hint: None, + }, + ), + ( + 22, + AssignCore { + core: 1, + begin: 24, + assignment: vec![(Task(1), 57600)], + end_hint: None, + }, + ), + ( + 28, + AssignCore { + core: 0, + begin: 30, + assignment: vec![(Task(1), 57600)], + end_hint: None, + }, + ), + ( + 28, + AssignCore { + core: 1, + begin: 30, + assignment: vec![(Task(2), 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 388370bce4d4..04e9a65bf8f6 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -216,11 +216,10 @@ impl Pallet { let assignment = CoreAssignment::Task(task); let schedule = BoundedVec::truncate_from(vec![ScheduleItem { mask, assignment }]); Workplan::::insert((region_begin, first_core), &schedule); - // Separate these to avoid missed expired leases hanging around forever. - let expired = until < region_end; - let expiring = until >= region_begin && expired; - if expiring { - // last time for this one - make it renewable. + // Will the lease expire at the end of the period? + let expire = until < region_end; + if expire { + // last time for this one - make it renewable in the next sale. let renewal_id = AllowedRenewalId { core: first_core, when: region_end }; let record = AllowedRenewalRecord { price, completion: Complete(schedule) }; AllowedRenewals::::insert(renewal_id, &record); @@ -232,8 +231,10 @@ impl Pallet { }); Self::deposit_event(Event::LeaseEnding { when: region_end, task }); } + first_core.saturating_inc(); - !expired + + !expire }); Leases::::put(&leases); diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index e8119d29ef5d..f2cae9a41ad4 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -84,7 +84,7 @@ pub struct RegionRecord { /// The end of the Region. pub end: Timeslice, /// The owner of the Region. - pub owner: AccountId, + pub owner: Option, /// The amount paid to Polkadot for this Region, or `None` if renewal is not allowed. pub paid: Option, } diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 3dba5be5b398..4163817a8b58 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -72,11 +72,11 @@ impl Pallet { Ok(()) } - pub(crate) fn issue( + pub fn issue( core: CoreIndex, begin: Timeslice, end: Timeslice, - owner: T::AccountId, + owner: Option, paid: Option>, ) -> RegionId { let id = RegionId { begin, core, mask: CoreMask::complete() }; @@ -94,7 +94,7 @@ impl Pallet { let region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } Regions::::remove(®ion_id); diff --git a/substrate/frame/contracts/fixtures/contracts/return_with_data.rs b/substrate/frame/contracts/fixtures/contracts/return_with_data.rs index 5340f86fbfc5..26f74edba5d0 100644 --- a/substrate/frame/contracts/fixtures/contracts/return_with_data.rs +++ b/substrate/frame/contracts/fixtures/contracts/return_with_data.rs @@ -38,6 +38,10 @@ pub extern "C" fn call() { output: [u8], ); + // Burn some PoV, clear_storage consumes some PoV as in order to clear the storage we need to we + // need to read its size first. + api::clear_storage_v1(b""); + let exit_status = uapi::ReturnFlags::from_bits(exit_status[0] as u32).unwrap(); api::return_value(exit_status, output); } diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index 3c06131dd608..bf3c00b3ff1f 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -15,87 +15,19 @@ // along with Polkadot. If not, see . use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; -use crate::{ - parachain, - parachain::RuntimeHoldReason, - primitives::{Balance, CENTS}, -}; -use frame_support::{ - parameter_types, - traits::{ConstBool, ConstU32, Contains, Randomness}, - weights::Weight, -}; -use frame_system::{pallet_prelude::BlockNumberFor, EnsureSigned}; -use pallet_xcm::BalanceOf; -use sp_runtime::{traits::Convert, Perbill}; - -pub const fn deposit(items: u32, bytes: u32) -> Balance { - items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS -} +use crate::parachain::RuntimeHoldReason; +use frame_support::{derive_impl, parameter_types}; parameter_types! { - pub const DepositPerItem: Balance = deposit(1, 0); - pub const DepositPerByte: Balance = deposit(0, 1); - pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); pub Schedule: pallet_contracts::Schedule = Default::default(); - pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); - pub const MaxDelegateDependencies: u32 = 32; -} - -pub struct DummyRandomness(sp_std::marker::PhantomData); - -impl Randomness> for DummyRandomness { - fn random(_subject: &[u8]) -> (T::Hash, BlockNumberFor) { - (Default::default(), Default::default()) - } -} - -impl Convert> for Runtime { - fn convert(w: Weight) -> BalanceOf { - w.ref_time().into() - } -} - -#[derive(Clone, Default)] -pub struct Filters; - -impl Contains for Filters { - fn contains(call: &RuntimeCall) -> bool { - match call { - parachain::RuntimeCall::Contracts(_) => true, - _ => false, - } - } } +#[derive_impl(pallet_contracts::config_preludes::TestDefaultConfig)] impl pallet_contracts::Config for Runtime { type AddressGenerator = pallet_contracts::DefaultAddressGenerator; - type CallFilter = Filters; type CallStack = [pallet_contracts::Frame; 5]; - type ChainExtension = (); - type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Currency = Balances; - type DefaultDepositLimit = DefaultDepositLimit; - type DepositPerByte = DepositPerByte; - type DepositPerItem = DepositPerItem; - type MaxCodeLen = ConstU32<{ 123 * 1024 }>; - type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; - type MaxDelegateDependencies = MaxDelegateDependencies; - type MaxStorageKeyLen = ConstU32<128>; - type Migrations = (); - type Randomness = DummyRandomness; - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type RuntimeHoldReason = RuntimeHoldReason; type Schedule = Schedule; type Time = super::Timestamp; - type UnsafeUnstableInterface = ConstBool; - type UploadOrigin = EnsureSigned; - type InstantiateOrigin = EnsureSigned; - type WeightInfo = (); - type WeightPrice = Self; - type Debug = (); - type Environment = (); - type ApiVersion = (); type Xcm = pallet_xcm::Pallet; } diff --git a/substrate/frame/contracts/src/benchmarking/call_builder.rs b/substrate/frame/contracts/src/benchmarking/call_builder.rs new file mode 100644 index 000000000000..285fe0052b4d --- /dev/null +++ b/substrate/frame/contracts/src/benchmarking/call_builder.rs @@ -0,0 +1,170 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + benchmarking::{Contract, WasmModule}, + exec::Stack, + storage::meter::Meter, + wasm::Runtime, + BalanceOf, Config, DebugBufferVec, Determinism, ExecReturnValue, GasMeter, Origin, Schedule, + TypeInfo, WasmBlob, Weight, +}; +use codec::{Encode, HasCompact}; +use core::fmt::Debug; +use sp_core::Get; +use sp_std::prelude::*; + +type StackExt<'a, T> = Stack<'a, T, WasmBlob>; + +/// A prepared contract call ready to be executed. +pub struct PreparedCall<'a, T: Config> { + func: wasmi::Func, + store: wasmi::Store>>, +} + +impl<'a, T: Config> PreparedCall<'a, T> { + pub fn call(mut self) -> ExecReturnValue { + let result = self.func.call(&mut self.store, &[], &mut []); + WasmBlob::::process_result(self.store, result).unwrap() + } +} + +/// A builder used to prepare a contract call. +pub struct CallSetup { + contract: Contract, + dest: T::AccountId, + origin: Origin, + gas_meter: GasMeter, + storage_meter: Meter, + schedule: Schedule, + value: BalanceOf, + debug_message: Option>, + determinism: Determinism, + data: Vec, +} + +impl CallSetup +where + T: Config + pallet_balances::Config, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, +{ + /// Setup a new call for the given module. + pub fn new(module: WasmModule) -> Self { + let contract = Contract::::new(module.clone(), vec![]).unwrap(); + let dest = contract.account_id.clone(); + let origin = Origin::from_account_id(contract.caller.clone()); + + let storage_meter = Meter::new(&origin, None, 0u32.into()).unwrap(); + + Self { + contract, + dest, + origin, + gas_meter: GasMeter::new(Weight::MAX), + storage_meter, + schedule: T::Schedule::get(), + value: 0u32.into(), + debug_message: None, + determinism: Determinism::Enforced, + data: vec![], + } + } + + /// Set the meter's storage deposit limit. + pub fn set_storage_deposit_limit(&mut self, balance: BalanceOf) { + self.storage_meter = Meter::new(&self.origin, Some(balance), 0u32.into()).unwrap(); + } + + /// Set the call's origin. + pub fn set_origin(&mut self, origin: Origin) { + self.origin = origin; + } + + /// Set the contract's balance. + pub fn set_balance(&mut self, value: BalanceOf) { + self.contract.set_balance(value); + } + + /// Set the call's input data. + pub fn set_data(&mut self, value: Vec) { + self.data = value; + } + + /// Set the debug message. + pub fn enable_debug_message(&mut self) { + self.debug_message = Some(Default::default()); + } + + /// Get the debug message. + pub fn debug_message(&self) -> Option> { + self.debug_message.clone() + } + + /// Get the call's input data. + pub fn data(&self) -> Vec { + self.data.clone() + } + + /// Get the call's contract. + pub fn contract(&self) -> Contract { + self.contract.clone() + } + + /// Build the call stack. + pub fn ext(&mut self) -> (StackExt<'_, T>, WasmBlob) { + StackExt::bench_new_call( + self.dest.clone(), + self.origin.clone(), + &mut self.gas_meter, + &mut self.storage_meter, + &self.schedule, + self.value, + self.debug_message.as_mut(), + self.determinism, + ) + } + + /// Prepare a call to the module. + pub fn prepare_call<'a>( + ext: &'a mut StackExt<'a, T>, + module: WasmBlob, + input: Vec, + ) -> PreparedCall<'a, T> { + let (func, store) = module.bench_prepare_call(ext, input); + PreparedCall { func, store } + } +} + +#[macro_export] +macro_rules! call_builder( + ($func: ident, $module:expr) => { + $crate::call_builder!($func, _contract, $module); + }; + ($func: ident, $contract: ident, $module:expr) => { + let mut setup = CallSetup::::new($module); + $crate::call_builder!($func, $contract, setup: setup); + }; + ($func:ident, setup: $setup: ident) => { + $crate::call_builder!($func, _contract, setup: $setup); + }; + ($func:ident, $contract: ident, setup: $setup: ident) => { + let data = $setup.data(); + let $contract = $setup.contract(); + let (mut ext, module) = $setup.ext(); + let $func = CallSetup::::prepare_call(&mut ext, module, data); + }; +); diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 094edee96e94..952ef180be21 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -18,9 +18,11 @@ //! Benchmarks for the contracts pallet #![cfg(feature = "runtime-benchmarks")] +mod call_builder; mod code; mod sandbox; use self::{ + call_builder::CallSetup, code::{ body::{self, DynInstr::*}, DataSegment, ImportedFunction, ImportedMemory, Location, ModuleDefinition, WasmModule, @@ -30,7 +32,7 @@ use self::{ use crate::{ exec::Key, migration::{ - codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, MigrationStep, + codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, v16, MigrationStep, }, Pallet as Contracts, *, }; @@ -63,6 +65,7 @@ const API_BENCHMARK_RUNS: u32 = 1600; const INSTR_BENCHMARK_RUNS: u32 = 5000; /// An instantiated and deployed contract. +#[derive(Clone)] struct Contract { caller: T::AccountId, account_id: T::AccountId, @@ -328,6 +331,26 @@ mod benchmarks { Ok(()) } + // This benchmarks the v16 migration step (Remove ED from base_deposit). + #[benchmark(pov_mode = Measured)] + fn v16_migration_step() -> Result<(), BenchmarkError> { + let contract = + >::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; + + let info = contract.info()?; + let base_deposit = v16::store_old_contract_info::(contract.account_id.clone(), &info); + let mut m = v16::Migration::::default(); + + #[block] + { + m.step(&mut WeightMeter::new()); + } + let ed = Pallet::::min_balance(); + let info = v16::ContractInfoOf::::get(&contract.account_id).unwrap(); + assert_eq!(info.storage_base_deposit, base_deposit - ed); + Ok(()) + } + // This benchmarks the weight of executing Migration::migrate to execute a noop migration. #[benchmark(pov_mode = Measured)] fn migration_noop() { @@ -598,17 +621,19 @@ mod benchmarks { } #[benchmark(pov_mode = Measured)] - fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::getter("seal0", "seal_caller", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_caller", r)); - Ok(()) + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_is_contract(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_is_contract(r: Linear<0, API_BENCHMARK_RUNS>) { let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); @@ -631,21 +656,23 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let info = instance.info()?; + call_builder!(func, instance, code); + let info = instance.info().unwrap(); // every account would be a contract (worst case) for acc in accounts.iter() { >::insert(acc, info.clone()); } - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); @@ -676,30 +703,35 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let info = instance.info()?; + call_builder!(func, instance, code); + let info = instance.info().unwrap(); // every account would be a contract (worst case) for acc in accounts.iter() { >::insert(acc, info.clone()); } - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::getter("seal0", "seal_own_code_hash", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_own_code_hash", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_origin(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_caller_is_origin(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -711,15 +743,18 @@ mod benchmarks { call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_root(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_caller_is_root(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -731,81 +766,104 @@ mod benchmarks { call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Root; - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + let mut setup = CallSetup::::new(code); + setup.set_origin(Origin::Root); + call_builder!(func, setup: setup); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::getter("seal0", "seal_address", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_address", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::getter("seal1", "gas_left", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal1", "gas_left", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::getter("seal0", "seal_balance", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_balance", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::getter("seal0", "seal_value_transferred", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_value_transferred", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::getter("seal0", "seal_minimum_balance", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_minimum_balance", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::getter("seal0", "seal_block_number", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_block_number", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::getter("seal0", "seal_now", r), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::getter("seal0", "seal_now", r)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_weight_to_fee(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_weight_to_fee(r: Linear<0, API_BENCHMARK_RUNS>) { let pages = code::max_pages::(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), @@ -831,15 +889,18 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -859,11 +920,15 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -903,7 +968,7 @@ mod benchmarks { // as precise as with other APIs. Because this function can only be called once per // contract it cannot be used as an attack vector. #[benchmark(pov_mode = Measured)] - fn seal_return(r: Linear<0, 1>) -> Result<(), BenchmarkError> { + fn seal_return(r: Linear<0, 1>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -923,17 +988,18 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_return_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { + fn seal_return_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -951,11 +1017,14 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // The same argument as for `seal_return` is true here. @@ -1060,7 +1129,7 @@ mod benchmarks { // number (< 1 KB). Therefore we are not overcharging too much in case a smaller subject is // used. #[benchmark(pov_mode = Measured)] - fn seal_random(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_random(r: Linear<0, API_BENCHMARK_RUNS>) { let pages = code::max_pages::(); let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); @@ -1088,17 +1157,21 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Overhead of calling the function without any topic. // We benchmark for the worst case (largest event). #[benchmark(pov_mode = Measured)] - fn seal_deposit_event(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_deposit_event(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1119,11 +1192,15 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Benchmark the overhead that topics generate. @@ -1133,7 +1210,7 @@ mod benchmarks { fn seal_deposit_event_per_topic_and_byte( t: Linear<0, { T::Schedule::get().limits.event_topics }>, n: Linear<0, { T::Schedule::get().limits.payload_len }>, - ) -> Result<(), BenchmarkError> { + ) { let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); let topics_len = topics.len(); let code = WasmModule::::from(ModuleDefinition { @@ -1155,11 +1232,15 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Benchmark debug_message call with zero input data. @@ -1186,23 +1267,16 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + let mut setup = CallSetup::::new(code); + setup.enable_debug_message(); + call_builder!(func, setup: setup); + let res; #[block] { - >::bare_call( - instance.caller, - instance.account_id, - 0u32.into(), - Weight::MAX, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result?; + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1246,22 +1320,17 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + let mut setup = CallSetup::::new(code); + setup.enable_debug_message(); + call_builder!(func, setup: setup); + + let res; #[block] { - >::bare_call( - instance.caller, - instance.account_id, - 0u32.into(), - Weight::MAX, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result?; + res = func.call(); } + assert_eq!(res.did_revert(), false); + assert_eq!(setup.debug_message().unwrap().len() as u32, i); Ok(()) } @@ -1309,7 +1378,8 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + + call_builder!(func, instance, code); let info = instance.info()?; for key in keys { info.write( @@ -1320,9 +1390,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; } - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1352,7 +1426,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1361,9 +1435,13 @@ mod benchmarks { false, ) .map_err(|_| "Failed to write to storage during setup.")?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1394,7 +1472,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1403,9 +1481,13 @@ mod benchmarks { false, ) .map_err(|_| "Failed to write to storage during setup.")?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1444,7 +1526,7 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; for key in keys { info.write( @@ -1456,9 +1538,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1486,7 +1572,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1495,9 +1581,13 @@ mod benchmarks { false, ) .map_err(|_| "Failed to write to storage during setup.")?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1542,7 +1632,7 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; for key in keys { info.write( @@ -1554,9 +1644,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1592,7 +1686,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1602,9 +1696,14 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); + Ok(()) } @@ -1642,7 +1741,7 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; for key in keys { info.write( @@ -1654,9 +1753,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1684,7 +1787,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1694,9 +1797,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1740,7 +1847,7 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; for key in keys { info.write( @@ -1752,9 +1859,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1790,7 +1901,7 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; + call_builder!(func, instance, code); let info = instance.info()?; info.write( &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, @@ -1800,9 +1911,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1842,14 +1957,20 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - instance.set_balance(value * (r + 1).into()); - let origin = RawOrigin::Signed(instance.caller.clone()); + let mut setup = CallSetup::::new(code); + setup.set_balance(value * (r + 1).into()); + call_builder!(func, setup: setup); + for account in &accounts { assert_eq!(T::Currency::total_balance(account), 0u32.into()); } - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); for account in &accounts { assert_eq!(T::Currency::total_balance(account), value); @@ -1923,17 +2044,16 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call( - origin, - instance.addr, - 0u32.into(), - Weight::MAX, - Some(BalanceOf::::from(u32::MAX.into()).into()), - vec![], - ); + let mut setup = CallSetup::::new(code); + setup.set_storage_deposit_limit(BalanceOf::::from(u32::MAX.into())); + call_builder!(func, setup: setup); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1984,11 +2104,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let callee = instance.addr.clone(); - let origin = RawOrigin::Signed(instance.caller); - #[extrinsic_call] - call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2037,11 +2160,16 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - let bytes = vec![42; c as usize]; - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, bytes); + let mut setup = CallSetup::::new(code); + setup.set_data(vec![42; c as usize]); + call_builder!(func, setup: setup); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2133,10 +2261,9 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - instance.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); - let origin = RawOrigin::Signed(instance.caller.clone()); - let callee = instance.addr.clone(); + let mut setup = CallSetup::::new(code); + setup.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); + call_builder!(func, instance, setup: setup); let addresses = hashes .iter() .map(|hash| Contracts::::contract_address(&instance.account_id, hash, &[], &[])) @@ -2147,8 +2274,13 @@ mod benchmarks { return Err("Expected that contract does not exist at this point.".into()); } } - #[extrinsic_call] - call(origin, callee, 0u32.into(), Weight::MAX, None, vec![]); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); for addr in &addresses { ContractInfoOf::::get(&addr).ok_or("Contract should have been instantiated")?; } @@ -2217,106 +2349,121 @@ mod benchmarks { ])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - instance.set_balance(value + (Pallet::::min_balance() * 2u32.into())); - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + let mut setup = CallSetup::::new(code); + setup.set_balance(value + (Pallet::::min_balance() * 2u32.into())); + call_builder!(func, setup: setup); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::hasher("seal_hash_sha2_256", r, 0), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", r, 0)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let instance = Contract::::new(WasmModule::hasher("seal_hash_sha2_256", 1, n), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_sha2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", 1, n)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_keccak_256", r, 0), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", r, 0)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_keccak_256", 1, n), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_keccak_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", 1, n)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_blake2_256", r, 0), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", r, 0)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_blake2_256", 1, n), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_blake2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", 1, n)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_blake2_128", r, 0), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) { + call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", r, 0)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let instance = - Contract::::new(WasmModule::hasher("seal_hash_blake2_128", 1, n), vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_hash_blake2_128_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", 1, n)); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // `n`: Message input length to verify in bytes. @@ -2359,10 +2506,14 @@ mod benchmarks { ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2415,10 +2566,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2464,10 +2619,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2503,10 +2662,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2543,10 +2706,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2584,10 +2751,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2641,10 +2812,14 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2697,15 +2872,19 @@ mod benchmarks { )), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); Ok(()) } #[benchmark(pov_mode = Measured)] - fn seal_instantiation_nonce(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { + fn seal_instantiation_nonce(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -2717,11 +2896,14 @@ mod benchmarks { call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), ..Default::default() }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + call_builder!(func, code); + + let res; + #[block] + { + res = func.call(); + } + assert_eq!(res.did_revert(), false); } // We load `i64` values from random linear memory locations and store the loaded diff --git a/substrate/frame/contracts/src/benchmarking/sandbox.rs b/substrate/frame/contracts/src/benchmarking/sandbox.rs index c3abbcad5f2b..308bf6873e49 100644 --- a/substrate/frame/contracts/src/benchmarking/sandbox.rs +++ b/substrate/frame/contracts/src/benchmarking/sandbox.rs @@ -20,7 +20,8 @@ /// ! environment that provides the seal interface as imported functions. use super::{code::WasmModule, Config}; use crate::wasm::{ - AllowDeprecatedInterface, AllowUnstableInterface, Determinism, Environment, WasmBlob, + AllowDeprecatedInterface, AllowUnstableInterface, Determinism, Environment, LoadedModule, + LoadingMode, WasmBlob, }; use sp_core::Get; use wasmi::{errors::LinkerError, Func, Linker, StackLimits, Store}; @@ -42,12 +43,18 @@ impl From<&WasmModule> for Sandbox { /// Creates an instance from the supplied module. /// Sets the execution engine fuel level to `u64::MAX`. fn from(module: &WasmModule) -> Self { - let (mut store, _memory, instance) = WasmBlob::::instantiate::( + let contract = LoadedModule::new::( &module.code, + Determinism::Relaxed, + Some(StackLimits::default()), + LoadingMode::Checked, + ) + .expect("Failed to load Wasm module"); + + let (mut store, _memory, instance) = WasmBlob::::instantiate::( + contract, (), &::Schedule::get(), - Determinism::Relaxed, - StackLimits::default(), // We are testing with an empty environment anyways AllowDeprecatedInterface::No, ) diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 41a0383811fd..31cdadb4bb43 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -733,6 +733,30 @@ where stack.run(executable, input_data).map(|ret| (account_id, ret)) } + #[cfg(feature = "runtime-benchmarks")] + pub fn bench_new_call( + dest: T::AccountId, + origin: Origin, + gas_meter: &'a mut GasMeter, + storage_meter: &'a mut storage::meter::Meter, + schedule: &'a Schedule, + value: BalanceOf, + debug_message: Option<&'a mut DebugBufferVec>, + determinism: Determinism, + ) -> (Self, E) { + Self::new( + FrameArgs::Call { dest, cached_info: None, delegated_call: None }, + origin, + gas_meter, + storage_meter, + schedule, + value, + debug_message, + determinism, + ) + .unwrap() + } + /// Create a new call stack. fn new( args: FrameArgs, diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 9a1a0aada894..edc4c872bfce 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -244,13 +244,13 @@ pub mod pallet { use sp_runtime::Perbill; /// The in-code storage version. - pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); + pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); - #[pallet::config] + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The time implementation used to supply timestamps to contracts through `seal_now`. type Time: Time; @@ -263,22 +263,30 @@ pub mod pallet { /// be instantiated from existing codes that use this deprecated functionality. It will /// be removed eventually. Hence for new `pallet-contracts` deployments it is okay /// to supply a dummy implementation for this type (because it is never used). + #[pallet::no_default_bounds] type Randomness: Randomness>; /// The fungible in which fees are paid and contract balances are held. + #[pallet::no_default] type Currency: Inspect + Mutate + MutateHold; /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. + #[pallet::no_default_bounds] type RuntimeCall: Dispatchable + GetDispatchInfo + codec::Decode + IsType<::RuntimeCall>; + /// Overarching hold reason. + #[pallet::no_default_bounds] + type RuntimeHoldReason: From; + /// Filter that is applied to calls dispatched by contracts. /// /// Use this filter to control which dispatchables are callable by contracts. @@ -301,10 +309,12 @@ pub mod pallet { /// /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact /// calls, you must configure them separately within the XCM pallet itself. + #[pallet::no_default_bounds] type CallFilter: Contains<::RuntimeCall>; /// Used to answer contracts' queries regarding the current weight price. This is **not** /// used to calculate the actual fee and is only for informational purposes. + #[pallet::no_default_bounds] type WeightPrice: Convert>; /// Describes the weights of the dispatchables of this module and is also used to @@ -312,10 +322,12 @@ pub mod pallet { type WeightInfo: WeightInfo; /// Type that allows the runtime authors to add new host functions for a contract to call. + #[pallet::no_default_bounds] type ChainExtension: chain_extension::ChainExtension + Default; /// Cost schedule and limits. #[pallet::constant] + #[pallet::no_default] type Schedule: Get>; /// The type of the call stack determines the maximum nesting depth of contract calls. @@ -326,6 +338,7 @@ pub mod pallet { /// /// This setting along with [`MaxCodeLen`](#associatedtype.MaxCodeLen) directly affects /// memory usage of your runtime. + #[pallet::no_default] type CallStack: Array>; /// The amount of balance a caller has to pay for each byte of storage. @@ -334,10 +347,12 @@ pub mod pallet { /// /// Changing this value for an existing chain might need a storage migration. #[pallet::constant] + #[pallet::no_default_bounds] type DepositPerByte: Get>; /// Fallback value to limit the storage deposit if it's not being set by the caller. #[pallet::constant] + #[pallet::no_default_bounds] type DefaultDepositLimit: Get>; /// The amount of balance a caller has to pay for each storage item. @@ -346,6 +361,7 @@ pub mod pallet { /// /// Changing this value for an existing chain might need a storage migration. #[pallet::constant] + #[pallet::no_default_bounds] type DepositPerItem: Get>; /// The percentage of the storage deposit that should be held for using a code hash. @@ -356,6 +372,7 @@ pub mod pallet { type CodeHashLockupDepositPercent: Get; /// The address generator used to generate the addresses of contracts. + #[pallet::no_default_bounds] type AddressGenerator: AddressGenerator; /// The maximum length of a contract code in bytes. @@ -395,6 +412,7 @@ pub mod pallet { /// /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to upload contract /// code. + #[pallet::no_default_bounds] type UploadOrigin: EnsureOrigin; /// Origin allowed to instantiate code. @@ -407,11 +425,9 @@ pub mod pallet { /// /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to instantiate /// contract code. + #[pallet::no_default_bounds] type InstantiateOrigin: EnsureOrigin; - /// Overarching hold reason. - type RuntimeHoldReason: From; - /// The sequence of migration steps that will be applied during a migration. /// /// # Examples @@ -435,6 +451,7 @@ pub mod pallet { /// For most production chains, it's recommended to use the `()` implementation of this /// trait. This implementation offers additional logging when the log target /// "runtime::contracts" is set to trace. + #[pallet::no_default_bounds] type Debug: Debugger; /// Type that bundles together all the runtime configurable interface types. @@ -442,16 +459,19 @@ pub mod pallet { /// This is not a real config. We just mention the type here as constant so that /// its type appears in the metadata. Only valid value is `()`. #[pallet::constant] + #[pallet::no_default_bounds] type Environment: Get>; /// The version of the HostFn APIs that are available in the runtime. /// /// Only valid value is `()`. #[pallet::constant] + #[pallet::no_default_bounds] type ApiVersion: Get; /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and /// execute XCM programs. + #[pallet::no_default_bounds] type Xcm: xcm_builder::Controller< OriginFor, ::RuntimeCall, @@ -459,6 +479,95 @@ pub mod pallet { >; } + /// Container for different types that implement [`DefaultConfig`]` of this pallet. + pub mod config_preludes { + use super::*; + use frame_support::{ + derive_impl, + traits::{ConstBool, ConstU32}, + }; + use frame_system::EnsureSigned; + use sp_core::parameter_types; + + type AccountId = sp_runtime::AccountId32; + type Balance = u64; + const UNITS: Balance = 10_000_000_000; + const CENTS: Balance = UNITS / 100; + + const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS + } + + parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const MaxDelegateDependencies: u32 = 32; + } + + /// A type providing default configurations for this pallet in testing environment. + pub struct TestDefaultConfig; + + impl Randomness for TestDefaultConfig { + fn random(_subject: &[u8]) -> (Output, BlockNumber) { + unimplemented!("No default `random` implementation in `TestDefaultConfig`, provide a custom `T::Randomness` type.") + } + } + + impl Time for TestDefaultConfig { + type Moment = u64; + fn now() -> Self::Moment { + unimplemented!("No default `now` implementation in `TestDefaultConfig` provide a custom `T::Time` type.") + } + } + + impl> Convert for TestDefaultConfig { + fn convert(w: Weight) -> T { + w.ref_time().into() + } + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + + #[inject_runtime_type] + type RuntimeHoldReason = (); + + #[inject_runtime_type] + type RuntimeCall = (); + + type AddressGenerator = DefaultAddressGenerator; + type CallFilter = (); + type ChainExtension = (); + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type DefaultDepositLimit = DefaultDepositLimit; + type DepositPerByte = DepositPerByte; + type DepositPerItem = DepositPerItem; + type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxStorageKeyLen = ConstU32<128>; + type Migrations = (); + type Time = Self; + type Randomness = Self; + type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; + type WeightInfo = (); + type WeightPrice = Self; + type Debug = (); + type Environment = (); + type ApiVersion = (); + type Xcm = (); + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index b0df45082856..c633ba9c2d50 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -64,6 +64,7 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; include!(concat!(env!("OUT_DIR"), "/migration_codegen.rs")); use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET}; diff --git a/substrate/frame/contracts/src/migration/v16.rs b/substrate/frame/contracts/src/migration/v16.rs new file mode 100644 index 000000000000..74fbc997718d --- /dev/null +++ b/substrate/frame/contracts/src/migration/v16.rs @@ -0,0 +1,107 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Remove ED from storage base deposit. +//! See . + +use crate::{ + migration::{IsFinished, MigrationStep}, + weights::WeightInfo, + BalanceOf, CodeHash, Config, Pallet, TrieId, Weight, WeightMeter, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; +use sp_runtime::{BoundedBTreeMap, Saturating}; +use sp_std::prelude::*; + +#[cfg(feature = "runtime-benchmarks")] +pub fn store_old_contract_info( + account: T::AccountId, + info: &crate::ContractInfo, +) -> BalanceOf { + let storage_base_deposit = Pallet::::min_balance() + 1u32.into(); + ContractInfoOf::::insert( + account, + ContractInfo { + trie_id: info.trie_id.clone(), + code_hash: info.code_hash, + storage_bytes: Default::default(), + storage_items: Default::default(), + storage_byte_deposit: Default::default(), + storage_item_deposit: Default::default(), + storage_base_deposit, + delegate_dependencies: Default::default(), + }, + ); + + storage_base_deposit +} + +#[storage_alias] +pub type ContractInfoOf = + StorageMap, Twox64Concat, ::AccountId, ContractInfo>; + +#[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct ContractInfo { + trie_id: TrieId, + code_hash: CodeHash, + storage_bytes: u32, + storage_items: u32, + storage_byte_deposit: BalanceOf, + storage_item_deposit: BalanceOf, + pub storage_base_deposit: BalanceOf, + delegate_dependencies: BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies>, +} + +#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] +pub struct Migration { + last_account: Option, +} + +impl MigrationStep for Migration { + const VERSION: u16 = 16; + + fn max_step_weight() -> Weight { + T::WeightInfo::v16_migration_step() + } + + fn step(&mut self, meter: &mut WeightMeter) -> IsFinished { + let mut iter = if let Some(last_account) = self.last_account.take() { + ContractInfoOf::::iter_keys_from(ContractInfoOf::::hashed_key_for(last_account)) + } else { + ContractInfoOf::::iter_keys() + }; + + if let Some(key) = iter.next() { + log::debug!(target: LOG_TARGET, "Migrating contract {:?}", key); + ContractInfoOf::::mutate(key.clone(), |info| { + let ed = Pallet::::min_balance(); + let mut updated_info = info.take().expect("Item exists; qed"); + updated_info.storage_base_deposit.saturating_reduce(ed); + *info = Some(updated_info); + }); + self.last_account = Some(key); + meter.consume(T::WeightInfo::v16_migration_step()); + IsFinished::No + } else { + log::debug!(target: LOG_TARGET, "No more contracts to migrate"); + meter.consume(T::WeightInfo::v16_migration_step()); + IsFinished::Yes + } + } +} diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index fc6d514ebf4d..1e9739a1599e 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -23,7 +23,7 @@ use crate::{ exec::{AccountIdOf, Key}, weights::WeightInfo, BalanceOf, CodeHash, CodeInfo, Config, ContractInfoOf, DeletionQueue, DeletionQueueCounter, - Error, Pallet, TrieId, SENTINEL, + Error, TrieId, SENTINEL, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ @@ -125,9 +125,7 @@ impl ContractInfo { /// Same as [`Self::extra_deposit`] but including the base deposit. pub fn total_deposit(&self) -> BalanceOf { - self.extra_deposit() - .saturating_add(self.storage_base_deposit) - .saturating_sub(Pallet::::min_balance()) + self.extra_deposit().saturating_add(self.storage_base_deposit) } /// Returns the storage base deposit of the contract. @@ -213,7 +211,6 @@ impl ContractInfo { /// The base deposit is updated when the `code_hash` of the contract changes, as it depends on /// the deposit paid to upload the contract's code. pub fn update_base_deposit(&mut self, code_info: &CodeInfo) -> BalanceOf { - let ed = Pallet::::min_balance(); let info_deposit = Diff { bytes_added: self.encoded_size() as u32, items_added: 1, ..Default::default() } .update_contract::(None) @@ -224,11 +221,7 @@ impl ContractInfo { // to prevent abuse. let upload_deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); - // Instantiate needs to transfer at least the minimum balance in order to pull the - // contract's own account into existence, as the deposit itself does not contribute to the - // `ed`. - let deposit = info_deposit.saturating_add(upload_deposit).saturating_add(ed); - + let deposit = info_deposit.saturating_add(upload_deposit); self.storage_base_deposit = deposit; deposit } diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 495cbd90db5a..5db9a772ad82 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -435,22 +435,12 @@ where contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, - ) -> Result, DispatchError> { + ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); - let ed = Pallet::::min_balance(); - - let deposit = contract_info.update_base_deposit(&code_info); - if deposit > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) - } - - let deposit = Deposit::Charge(deposit); - - // We do not increase `own_contribution` because this will be charged later when the - // contract execution does conclude and hence would lead to a double charge. - self.total_deposit = Deposit::Charge(ed); // We need to make sure that the contract's account exists. + let ed = Pallet::::min_balance(); + self.total_deposit = Deposit::Charge(ed); T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; // A consumer is added at account creation and removed it on termination, otherwise the @@ -458,9 +448,11 @@ where // With the consumer, a correct runtime cannot remove the account. System::::inc_consumers(contract)?; - self.charge_deposit(contract.clone(), deposit.saturating_sub(&Deposit::Charge(ed))); + let deposit = contract_info.update_base_deposit(&code_info); + let deposit = Deposit::Charge(deposit); - Ok(deposit) + self.charge_deposit(contract.clone(), deposit); + Ok(()) } /// Call to tell the meter that the currently executing contract was terminated. @@ -859,14 +851,14 @@ mod tests { let test_cases = vec![ ChargingTestCase { origin: Origin::::from_account_id(ALICE), - deposit: Deposit::Refund(107), + deposit: Deposit::Refund(108), expected: TestExt { limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], charges: vec![ Charge { origin: ALICE, contract: CHARLIE, - amount: Deposit::Refund(119), + amount: Deposit::Refund(120), state: ContractState::Terminated { beneficiary: CHARLIE }, }, Charge { @@ -915,7 +907,6 @@ mod tests { meter.absorb(nested0, &BOB, None); assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); - assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index d7ec710e768e..57b804a51e41 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -34,7 +34,7 @@ use crate::{ primitives::CodeUploadReturnValue, storage::DeletionQueueManager, tests::test_utils::{get_contract, get_contract_checked}, - wasm::{Determinism, ReturnErrorCode as RuntimeReturnCode}, + wasm::{Determinism, LoadingMode, ReturnErrorCode as RuntimeReturnCode}, weights::WeightInfo, Array, BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, @@ -334,34 +334,24 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { + type Block = Block; type AccountId = AccountId32; type Lookup = IdentityLookup; - type Block = Block; type AccountData = pallet_balances::AccountData; } + impl pallet_insecure_randomness_collective_flip::Config for Test {} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; + type ReserveIdentifier = [u8; 8]; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } -impl pallet_timestamp::Config for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = ConstU64<1>; - type WeightInfo = (); -} +#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] +impl pallet_timestamp::Config for Test {} + impl pallet_utility::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -467,16 +457,13 @@ parameter_types! { pub static UnstableInterface: bool = true; } +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; type CallFilter = TestFilter; type CallStack = [Frame; 5]; - type WeightPrice = Self; - type WeightInfo = (); type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type Schedule = MySchedule; @@ -484,20 +471,13 @@ impl Config for Test { type DepositPerItem = DepositPerItem; type DefaultDepositLimit = DefaultDepositLimit; type AddressGenerator = DefaultAddressGenerator; - type MaxCodeLen = ConstU32<{ 123 * 1024 }>; - type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; type UploadOrigin = EnsureAccount; type InstantiateOrigin = EnsureAccount; - type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; - type RuntimeHoldReason = RuntimeHoldReason; type Migrations = crate::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; - type Environment = (); - type ApiVersion = (); - type Xcm = (); } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); @@ -1102,6 +1082,20 @@ fn delegate_call() { }); } +#[test] +fn track_check_uncheck_module_call() { + let (wasm, code_hash) = compile_module::("dummy").unwrap(); + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + Contracts::bare_upload_code(ALICE, wasm, None, Determinism::Enforced).unwrap(); + builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_result(); + }); + + // It should have recorded 1 `Checked` for the upload and 1 `Unchecked` for the instantiate. + let record = crate::wasm::tracker::LOADED_MODULE.with(|stack| stack.borrow().clone()); + assert_eq!(record, vec![LoadingMode::Checked, LoadingMode::Unchecked]); +} + #[test] fn transfer_expendable_cannot_kill_account() { let (wasm, _code_hash) = compile_module::("dummy").unwrap(); @@ -3803,7 +3797,7 @@ fn locking_delegate_dependency_works() { &HoldReason::StorageDepositReserve.into(), &addr_caller ), - dependency_deposit + contract.storage_base_deposit() - ED + dependency_deposit + contract.storage_base_deposit() ); // Removing the code should fail, since we have added a dependency. @@ -3842,7 +3836,7 @@ fn locking_delegate_dependency_works() { &HoldReason::StorageDepositReserve.into(), &addr_caller ), - contract.storage_base_deposit() - ED + contract.storage_base_deposit() ); // Removing a nonexistent dependency should fail. @@ -3874,7 +3868,7 @@ fn locking_delegate_dependency_works() { assert_ok!(call(&addr_caller, &terminate_input).result); assert_eq!( test_utils::get_balance(&ALICE), - balance_before + contract.storage_base_deposit() + dependency_deposit + ED + balance_before + contract.storage_base_deposit() + dependency_deposit ); // Terminate should also remove the dependency, so we can remove the code. @@ -3890,13 +3884,9 @@ fn native_dependency_deposit_works() { // Set hash lock up deposit to 30%, to test deposit calculation. CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); - // Set a low existential deposit so that the base storage deposit is based on the contract - // storage deposit rather than the existential deposit. - const ED: u64 = 10; - // Test with both existing and uploaded code for code in [Code::Upload(wasm.clone()), Code::Existing(code_hash)] { - ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + ExtBuilder::default().build().execute_with(|| { let _ = Balances::set_balance(&ALICE, 1_000_000); let lockup_deposit_percent = CodeHashLockupDepositPercent::get(); @@ -3928,16 +3918,16 @@ fn native_dependency_deposit_works() { let res = builder::bare_instantiate(code).build(); let addr = res.result.unwrap().account_id; - let base_deposit = ED + test_utils::contract_info_storage_deposit(&addr); + let base_deposit = test_utils::contract_info_storage_deposit(&addr); let upload_deposit = test_utils::get_code_deposit(&code_hash); let extra_deposit = add_upload_deposit.then(|| upload_deposit).unwrap_or_default(); // Check initial storage_deposit - // The base deposit should be: ED + contract_info_storage_deposit + 30% * deposit + // The base deposit should be: contract_info_storage_deposit + 30% * deposit let deposit = extra_deposit + base_deposit + lockup_deposit_percent.mul_ceil(upload_deposit); - assert_eq!(res.storage_deposit.charge_or_zero(), deposit); + assert_eq!(res.storage_deposit.charge_or_zero(), deposit + Contracts::min_balance()); // call set_code_hash builder::bare_call(addr.clone()) @@ -3948,9 +3938,10 @@ fn native_dependency_deposit_works() { let code_deposit = test_utils::get_code_deposit(&dummy_code_hash); let deposit = base_deposit + lockup_deposit_percent.mul_ceil(code_deposit); assert_eq!(test_utils::get_contract(&addr).storage_base_deposit(), deposit); + assert_eq!( test_utils::get_balance_on_hold(&HoldReason::StorageDepositReserve.into(), &addr), - deposit - ED + deposit ); }); } diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index cae0b7c40206..8d7f928dba33 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -21,26 +21,26 @@ mod prepare; mod runtime; -#[cfg(test)] -pub use runtime::STABLE_API_COUNT; - #[cfg(doc)] pub use crate::wasm::runtime::api_doc; -pub use crate::wasm::runtime::{ - AllowDeprecatedInterface, AllowUnstableInterface, Environment, Runtime, RuntimeCosts, -}; - #[cfg(test)] -pub use tests::MockExt; +pub use { + crate::wasm::{prepare::tracker, runtime::ReturnErrorCode}, + runtime::STABLE_API_COUNT, + tests::MockExt, +}; -#[cfg(test)] -pub use crate::wasm::runtime::ReturnErrorCode; +pub use crate::wasm::{ + prepare::{LoadedModule, LoadingMode}, + runtime::{ + AllowDeprecatedInterface, AllowUnstableInterface, Environment, Runtime, RuntimeCosts, + }, +}; use crate::{ exec::{ExecResult, Executable, ExportedFunction, Ext}, gas::{GasMeter, Token}, - wasm::prepare::LoadedModule, weights::WeightInfo, AccountIdOf, BadOrigin, BalanceOf, CodeHash, CodeInfoOf, CodeVec, Config, Error, Event, HoldReason, Pallet, PristineCode, Schedule, Weight, LOG_TARGET, @@ -201,17 +201,14 @@ impl WasmBlob { /// When validating we pass `()` as `host_state`. Please note that such a dummy instance must /// **never** be called/executed, since it will panic the executor. pub fn instantiate( - code: &[u8], + contract: LoadedModule, host_state: H, schedule: &Schedule, - determinism: Determinism, - stack_limits: StackLimits, allow_deprecated: AllowDeprecatedInterface, ) -> Result<(Store, Memory, InstancePre), &'static str> where E: Environment, { - let contract = LoadedModule::new::(&code, determinism, Some(stack_limits))?; let mut store = Store::new(&contract.engine, host_state); let mut linker = Linker::new(&contract.engine); E::define( @@ -338,33 +335,66 @@ impl CodeInfo { } } -impl Executable for WasmBlob { - fn from_storage( - code_hash: CodeHash, - gas_meter: &mut GasMeter, - ) -> Result { - let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; - gas_meter.charge(CodeLoadToken(code_info.code_len))?; - let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; - Ok(Self { code, code_info, code_hash }) +use crate::{ExecError, ExecReturnValue}; +use wasmi::Func; +enum InstanceOrExecReturn<'a, E: Ext> { + Instance((Func, Store>)), + ExecReturn(ExecReturnValue), +} + +type PreExecResult<'a, E> = Result, ExecError>; + +impl WasmBlob { + /// Sync the frame's gas meter with the engine's one. + pub fn process_result>( + mut store: Store>, + result: Result<(), wasmi::Error>, + ) -> ExecResult { + let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); + let gas_meter = store.data_mut().ext().gas_meter_mut(); + let _ = gas_meter.sync_from_executor(engine_consumed_total)?; + store.into_data().to_execution_result(result) } - fn execute>( + #[cfg(feature = "runtime-benchmarks")] + pub fn bench_prepare_call>( self, ext: &mut E, - function: &ExportedFunction, input_data: Vec, - ) -> ExecResult { + ) -> (Func, Store>) { + use InstanceOrExecReturn::*; + match Self::prepare_execute(self, Runtime::new(ext, input_data), &ExportedFunction::Call) + .expect("Benchmark should provide valid module") + { + Instance((func, store)) => (func, store), + ExecReturn(_) => panic!("Expected Instance"), + } + } + + fn prepare_execute<'a, E: Ext>( + self, + runtime: Runtime<'a, E>, + function: &'a ExportedFunction, + ) -> PreExecResult<'a, E> { let code = self.code.as_slice(); // Instantiate the Wasm module to the engine. - let runtime = Runtime::new(ext, input_data); let schedule = ::Schedule::get(); + + let contract = LoadedModule::new::( + &code, + self.code_info.determinism, + Some(StackLimits::default()), + LoadingMode::Unchecked, + ) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to create wasmi module: {err:?}"); + Error::::CodeRejected + })?; + let (mut store, memory, instance) = Self::instantiate::( - code, + contract, runtime, &schedule, - self.code_info.determinism, - StackLimits::default(), match function { ExportedFunction::Call => AllowDeprecatedInterface::Yes, ExportedFunction::Constructor => AllowDeprecatedInterface::No, @@ -390,15 +420,6 @@ impl Executable for WasmBlob { .add_fuel(fuel_limit) .expect("We've set up engine to fuel consuming mode; qed"); - // Sync this frame's gas meter with the engine's one. - let process_result = |mut store: Store>, result| { - let engine_consumed_total = - store.fuel_consumed().expect("Fuel metering is enabled; qed"); - let gas_meter = store.data_mut().ext().gas_meter_mut(); - let _ = gas_meter.sync_from_executor(engine_consumed_total)?; - store.into_data().to_execution_result(result) - }; - // Start function should already see the correct refcount in case it will be ever inspected. if let &ExportedFunction::Constructor = function { E::increment_refcount(self.code_hash)?; @@ -417,10 +438,37 @@ impl Executable for WasmBlob { Error::::CodeRejected })?; - let result = exported_func.call(&mut store, &[], &mut []); - process_result(store, result) + Ok(InstanceOrExecReturn::Instance((exported_func, store))) }, - Err(err) => process_result(store, Err(err)), + Err(err) => Self::process_result(store, Err(err)).map(InstanceOrExecReturn::ExecReturn), + } + } +} + +impl Executable for WasmBlob { + fn from_storage( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result { + let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; + gas_meter.charge(CodeLoadToken(code_info.code_len))?; + let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; + Ok(Self { code, code_info, code_hash }) + } + + fn execute>( + self, + ext: &mut E, + function: &ExportedFunction, + input_data: Vec, + ) -> ExecResult { + use InstanceOrExecReturn::*; + match Self::prepare_execute(self, Runtime::new(ext, input_data), function)? { + Instance((func, mut store)) => { + let result = func.call(&mut store, &[], &mut []); + Self::process_result(store, result) + }, + ExecReturn(exec_return) => Ok(exec_return), } } @@ -1780,6 +1828,7 @@ mod tests { const CODE_GAS_LEFT: &str = r#" (module (import "seal1" "gas_left" (func $seal_gas_left (param i32 i32))) + (import "seal0" "clear_storage" (func $clear_storage (param i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1796,6 +1845,9 @@ mod tests { ) (func (export "call") + ;; Burn some PoV, clear_storage consumes some PoV as in order to clear the storage we need to we need to read its size first. + (call $clear_storage (i32.const 0)) + ;; This stores the weight left to the buffer (call $seal_gas_left (i32.const 0) (i32.const 20)) @@ -1807,6 +1859,9 @@ mod tests { ) ) + ;; Burn some PoV, clear_storage consumes some PoV as in order to clear the storage we need to we need to read its size first. + (call $clear_storage (i32.const 0)) + ;; Return weight left and its encoded value len (call $seal_return (i32.const 0) (i32.const 0) (i32.load (i32.const 20))) diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 5efea8c3a23b..0d9a12d8ae83 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -48,6 +48,20 @@ pub struct LoadedModule { pub engine: Engine, } +#[derive(PartialEq, Debug, Clone)] +pub enum LoadingMode { + Checked, + Unchecked, +} + +#[cfg(test)] +pub mod tracker { + use sp_std::cell::RefCell; + thread_local! { + pub static LOADED_MODULE: RefCell> = RefCell::new(Vec::new()); + } +} + impl LoadedModule { /// Creates a new instance of `LoadedModule`. /// @@ -57,6 +71,7 @@ impl LoadedModule { code: &[u8], determinism: Determinism, stack_limits: Option, + _mode: LoadingMode, ) -> Result { // NOTE: wasmi does not support unstable WebAssembly features. The module is implicitly // checked for not having those ones when creating `wasmi::Module` below. @@ -79,11 +94,16 @@ impl LoadedModule { } let engine = Engine::new(&config); + + // TODO use Module::new_unchecked when validate_module is true once we are on wasmi@0.32 let module = Module::new(&engine, code).map_err(|err| { log::debug!(target: LOG_TARGET, "Module creation failed: {:?}", err); "Can't load the module into wasmi!" })?; + #[cfg(test)] + tracker::LOADED_MODULE.with(|t| t.borrow_mut().push(_mode)); + // Return a `LoadedModule` instance with // __valid__ module. Ok(LoadedModule { module, engine }) @@ -229,24 +249,38 @@ where E: Environment<()>, T: Config, { - (|| { + let module = (|| { + // We don't actually ever execute this instance so we can get away with a minimal stack + // which reduces the amount of memory that needs to be zeroed. + let stack_limits = Some(StackLimits::new(1, 1, 0).expect("initial <= max; qed")); + // We check that the module is generally valid, // and does not have restricted WebAssembly features, here. let contract_module = match *determinism { Determinism::Relaxed => - if let Ok(module) = LoadedModule::new::(code, Determinism::Enforced, None) { + if let Ok(module) = LoadedModule::new::( + code, + Determinism::Enforced, + stack_limits, + LoadingMode::Checked, + ) { *determinism = Determinism::Enforced; module } else { - LoadedModule::new::(code, Determinism::Relaxed, None)? + LoadedModule::new::(code, Determinism::Relaxed, None, LoadingMode::Checked)? }, - Determinism::Enforced => LoadedModule::new::(code, Determinism::Enforced, None)?, + Determinism::Enforced => LoadedModule::new::( + code, + Determinism::Enforced, + stack_limits, + LoadingMode::Checked, + )?, }; // The we check that module satisfies constraints the pallet puts on contracts. contract_module.scan_exports()?; contract_module.scan_imports::(schedule)?; - Ok(()) + Ok(contract_module) })() .map_err(|msg: &str| { log::debug!(target: LOG_TARGET, "New code rejected on validation: {}", msg); @@ -257,22 +291,11 @@ where // // - It doesn't use any unknown imports. // - It doesn't explode the wasmi bytecode generation. - // - // We don't actually ever execute this instance so we can get away with a minimal stack which - // reduces the amount of memory that needs to be zeroed. - let stack_limits = StackLimits::new(1, 1, 0).expect("initial <= max; qed"); - WasmBlob::::instantiate::( - &code, - (), - schedule, - *determinism, - stack_limits, - AllowDeprecatedInterface::No, - ) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "{}", err); - (Error::::CodeRejected.into(), "New code rejected on wasmi instantiation!") - })?; + WasmBlob::::instantiate::(module, (), schedule, AllowDeprecatedInterface::No) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "{err}"); + (Error::::CodeRejected.into(), "New code rejected on wasmi instantiation!") + })?; Ok(()) } @@ -325,7 +348,8 @@ pub mod benchmarking { owner: AccountIdOf, ) -> Result, DispatchError> { let determinism = Determinism::Enforced; - let contract_module = LoadedModule::new::(&code, determinism, None)?; + let contract_module = + LoadedModule::new::(&code, determinism, None, LoadingMode::Checked)?; let _ = contract_module.scan_imports::(schedule)?; let code: CodeVec = code.try_into().map_err(|_| >::CodeTooLarge)?; let code_info = CodeInfo { diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 5d067365e171..b95b1d1a9a2e 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_contracts -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/contracts/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_contracts +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/contracts/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -60,6 +58,7 @@ pub trait WeightInfo { fn v13_migration_step() -> Weight; fn v14_migration_step() -> Weight; fn v15_migration_step() -> Weight; + fn v16_migration_step() -> Weight; fn migration_noop() -> Weight; fn migrate() -> Weight; fn on_runtime_upgrade_noop() -> Weight; @@ -144,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_140_000 picoseconds. - Weight::from_parts(2_243_000, 1627) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_274_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -155,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_655_000 picoseconds. - Weight::from_parts(12_863_000, 442) - // Standard Error: 1_147 - .saturating_add(Weight::from_parts(1_097_546, 0).saturating_mul(k.into())) + // Minimum execution time: 12_863_000 picoseconds. + Weight::from_parts(13_188_000, 442) + // Standard Error: 1_053 + .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -172,10 +171,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_626_000 picoseconds. - Weight::from_parts(8_805_071, 6149) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(9_203_290, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_188, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -188,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_748_000 picoseconds. - Weight::from_parts(17_483_000, 6450) + // Minimum execution time: 17_177_000 picoseconds. + Weight::from_parts(17_663_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -202,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_632_000 picoseconds. - Weight::from_parts(3_706_000, 3635) - // Standard Error: 903 - .saturating_add(Weight::from_parts(1_185_348, 0).saturating_mul(k.into())) + // Minimum execution time: 3_636_000 picoseconds. + Weight::from_parts(3_774_000, 3635) + // Standard Error: 542 + .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -226,10 +225,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_630_000 picoseconds. - Weight::from_parts(20_845_294, 6266) - // Standard Error: 2 - .saturating_add(Weight::from_parts(421, 0).saturating_mul(c.into())) + // Minimum execution time: 21_585_000 picoseconds. + Weight::from_parts(22_069_944, 6266) + // Standard Error: 1 + .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -240,8 +239,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_178_000 picoseconds. - Weight::from_parts(13_671_000, 6380) + // Minimum execution time: 13_283_000 picoseconds. + Weight::from_parts(14_015_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -255,8 +254,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 45_828_000 picoseconds. - Weight::from_parts(46_476_000, 6292) + // Minimum execution time: 48_022_000 picoseconds. + Weight::from_parts(49_627_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -268,19 +267,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 54_767_000 picoseconds. - Weight::from_parts(56_879_000, 6534) + // Minimum execution time: 58_374_000 picoseconds. + Weight::from_parts(59_615_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + fn v16_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `409` + // Estimated: `6349` + // Minimum execution time: 12_559_000 picoseconds. + Weight::from_parts(12_947_000, 6349) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) fn migration_noop() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_599_000 picoseconds. - Weight::from_parts(2_794_000, 1627) + // Minimum execution time: 2_480_000 picoseconds. + Weight::from_parts(2_680_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -292,8 +302,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_275_000 picoseconds. - Weight::from_parts(10_709_000, 3631) + // Minimum execution time: 12_625_000 picoseconds. + Weight::from_parts(13_094_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -303,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 3_813_000 picoseconds. - Weight::from_parts(4_033_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_182_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -315,8 +325,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_160_000 picoseconds. - Weight::from_parts(5_478_000, 3632) + // Minimum execution time: 6_319_000 picoseconds. + Weight::from_parts(6_582_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -327,8 +337,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_178_000 picoseconds. - Weight::from_parts(5_540_000, 3607) + // Minimum execution time: 6_532_000 picoseconds. + Weight::from_parts(6_909_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -353,10 +363,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 295_545_000 picoseconds. - Weight::from_parts(291_859_570, 9217) - // Standard Error: 73 - .saturating_add(Weight::from_parts(33_546, 0).saturating_mul(c.into())) + // Minimum execution time: 305_778_000 picoseconds. + Weight::from_parts(282_321_249, 9217) + // Standard Error: 72 + .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -388,14 +398,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_796_729_000 picoseconds. - Weight::from_parts(808_328_941, 8740) - // Standard Error: 156 - .saturating_add(Weight::from_parts(99_863, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_433, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_501, 0).saturating_mul(s.into())) + // Minimum execution time: 3_810_809_000 picoseconds. + Weight::from_parts(739_511_598, 8740) + // Standard Error: 140 + .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -425,12 +435,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_004_502_000 picoseconds. - Weight::from_parts(2_013_544_000, 8982) + // Minimum execution time: 1_986_789_000 picoseconds. + Weight::from_parts(2_017_466_000, 8982) // Standard Error: 26 - .saturating_add(Weight::from_parts(807, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) // Standard Error: 26 - .saturating_add(Weight::from_parts(784, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -454,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 213_568_000 picoseconds. - Weight::from_parts(219_796_000, 9244) + // Minimum execution time: 210_724_000 picoseconds. + Weight::from_parts(218_608_000, 9244) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -476,10 +486,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 290_835_000 picoseconds. - Weight::from_parts(396_613_751, 6085) - // Standard Error: 160 - .saturating_add(Weight::from_parts(63_993, 0).saturating_mul(c.into())) + // Minimum execution time: 271_259_000 picoseconds. + Weight::from_parts(298_852_854, 6085) + // Standard Error: 65 + .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -500,10 +510,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 302_249_000 picoseconds. - Weight::from_parts(312_519_255, 6085) - // Standard Error: 90 - .saturating_add(Weight::from_parts(64_908, 0).saturating_mul(c.into())) + // Minimum execution time: 278_167_000 picoseconds. + Weight::from_parts(311_888_941, 6085) + // Standard Error: 58 + .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -521,8 +531,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 46_580_000 picoseconds. - Weight::from_parts(47_676_000, 3780) + // Minimum execution time: 47_403_000 picoseconds. + Weight::from_parts(48_707_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -538,271 +548,174 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 35_454_000 picoseconds. - Weight::from_parts(36_956_000, 8967) + // Minimum execution time: 35_361_000 picoseconds. + Weight::from_parts(36_714_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869 + r * (6 ±0)` - // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 265_797_000 picoseconds. - Weight::from_parts(305_958_124, 9284) - // Standard Error: 3_049 - .saturating_add(Weight::from_parts(343_670, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_340_000 picoseconds. + Weight::from_parts(9_360_237, 0) + // Standard Error: 269 + .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `925 + r * (209 ±0)` - // Estimated: `9304 + r * (2684 ±0)` - // Minimum execution time: 273_327_000 picoseconds. - Weight::from_parts(143_036_031, 9304) - // Standard Error: 5_612 - .saturating_add(Weight::from_parts(3_745_297, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `509 + r * (77 ±0)` + // Estimated: `1467 + r * (2552 ±0)` + // Minimum execution time: 9_059_000 picoseconds. + Weight::from_parts(9_201_000, 1467) + // Standard Error: 5_643 + .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (213 ±0)` - // Estimated: `9308 + r * (2688 ±0)` - // Minimum execution time: 279_590_000 picoseconds. - Weight::from_parts(147_961_398, 9308) - // Standard Error: 6_198 - .saturating_add(Weight::from_parts(4_677_881, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `517 + r * (170 ±0)` + // Estimated: `1468 + r * (2645 ±0)` + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_399_000, 1468) + // Standard Error: 6_194 + .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `876 + r * (6 ±0)` - // Estimated: `9293 + r * (6 ±0)` - // Minimum execution time: 277_334_000 picoseconds. - Weight::from_parts(285_263_644, 9293) - // Standard Error: 771 - .saturating_add(Weight::from_parts(450_538, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_707_000 picoseconds. + Weight::from_parts(10_100_456, 0) + // Standard Error: 234 + .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (3 ±0)` - // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 262_895_000 picoseconds. - Weight::from_parts(290_274_813, 9282) - // Standard Error: 356 - .saturating_add(Weight::from_parts(172_585, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_524_000 picoseconds. + Weight::from_parts(10_813_389, 0) + // Standard Error: 76 + .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `756 + r * (3 ±0)` - // Estimated: `9171 + r * (3 ±0)` - // Minimum execution time: 261_701_000 picoseconds. - Weight::from_parts(279_765_708, 9171) - // Standard Error: 451 - .saturating_add(Weight::from_parts(158_243, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_799_000 picoseconds. + Weight::from_parts(10_886_744, 0) + // Standard Error: 75 + .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870 + r * (6 ±0)` - // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 284_209_000 picoseconds. - Weight::from_parts(294_215_782, 9285) - // Standard Error: 703 - .saturating_add(Weight::from_parts(344_236, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_895_000 picoseconds. + Weight::from_parts(10_658_338, 0) + // Standard Error: 189 + .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 277_126_000 picoseconds. - Weight::from_parts(292_436_333, 9284) - // Standard Error: 1_215 - .saturating_add(Weight::from_parts(380_107, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_643_000 picoseconds. + Weight::from_parts(10_932_126, 0) + // Standard Error: 153 + .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:0) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1010 + r * (6 ±0)` - // Estimated: `9409 + r * (6 ±0)` - // Minimum execution time: 266_377_000 picoseconds. - Weight::from_parts(295_163_193, 9409) - // Standard Error: 4_026 - .saturating_add(Weight::from_parts(1_859_387, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `140` + // Estimated: `3599` + // Minimum execution time: 9_548_000 picoseconds. + Weight::from_parts(9_737_000, 3599) + // Standard Error: 971 + .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// The range of component `r` is `[0, 1600]`. + fn seal_value_transferred(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_172_000 picoseconds. + Weight::from_parts(18_255_933, 0) + // Standard Error: 540 + .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) + } + /// The range of component `r` is `[0, 1600]`. + fn seal_minimum_balance(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_232_000 picoseconds. + Weight::from_parts(9_796_584, 0) + // Standard Error: 208 + .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) + } + /// The range of component `r` is `[0, 1600]`. + fn seal_block_number(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_747_000 picoseconds. + Weight::from_parts(8_733_230, 0) + // Standard Error: 377 + .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) + } + /// The range of component `r` is `[0, 1600]`. + fn seal_now(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_214_000 picoseconds. + Weight::from_parts(10_194_153, 0) + // Standard Error: 516 + .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// The range of component `r` is `[0, 1600]`. + fn seal_weight_to_fee(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 9_022_000 picoseconds. + Weight::from_parts(22_051_160, 1552) + // Standard Error: 697 + .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// The range of component `r` is `[0, 1600]`. + fn seal_input(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_135_000 picoseconds. + Weight::from_parts(10_646_215, 0) + // Standard Error: 161 + .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -820,266 +733,55 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_value_transferred(r: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048576]`. + fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + r * (6 ±0)` - // Estimated: `9301 + r * (6 ±0)` - // Minimum execution time: 276_990_000 picoseconds. - Weight::from_parts(296_463_738, 9301) - // Standard Error: 655 - .saturating_add(Weight::from_parts(335_070, 0).saturating_mul(r.into())) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 273_896_000 picoseconds. + Weight::from_parts(148_309_654, 9287) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + } + /// The range of component `r` is `[0, 1]`. + fn seal_return(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_906_000 picoseconds. + Weight::from_parts(9_264_446, 0) + // Standard Error: 19_760 + .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) + } + /// The range of component `n` is `[0, 1048576]`. + fn seal_return_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_266_000 picoseconds. + Weight::from_parts(10_602_261, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Storage: `Parameters::Parameters` (r:1 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_minimum_balance(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `878 + r * (6 ±0)` - // Estimated: `9294 + r * (6 ±0)` - // Minimum execution time: 274_845_000 picoseconds. - Weight::from_parts(294_870_901, 9294) - // Standard Error: 793 - .saturating_add(Weight::from_parts(336_049, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_block_number(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `9297 + r * (6 ±0)` - // Minimum execution time: 277_792_000 picoseconds. - Weight::from_parts(290_529_469, 9297) - // Standard Error: 1_191 - .saturating_add(Weight::from_parts(339_291, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_now(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `9282 + r * (6 ±0)` - // Minimum execution time: 271_060_000 picoseconds. - Weight::from_parts(287_512_151, 9282) - // Standard Error: 859 - .saturating_add(Weight::from_parts(345_414, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_weight_to_fee(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `940 + r * (14 ±0)` - // Estimated: `9350 + r * (14 ±0)` - // Minimum execution time: 265_401_000 picoseconds. - Weight::from_parts(303_350_793, 9350) - // Standard Error: 1_995 - .saturating_add(Weight::from_parts(885_253, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_input(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `868 + r * (6 ±0)` - // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 264_474_000 picoseconds. - Weight::from_parts(289_972_634, 9285) - // Standard Error: 719 - .saturating_add(Weight::from_parts(269_856, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_input_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `9287` - // Minimum execution time: 266_661_000 picoseconds. - Weight::from_parts(147_923_867, 9287) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `856 + r * (45 ±0)` - // Estimated: `9271 + r * (45 ±0)` - // Minimum execution time: 257_851_000 picoseconds. - Weight::from_parts(285_687_679, 9271) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_return_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `866` - // Estimated: `9288` - // Minimum execution time: 283_478_000 picoseconds. - Weight::from_parts(290_827_773, 9288) - // Standard Error: 0 - .saturating_add(Weight::from_parts(319, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:1 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) + /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) + /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) @@ -1090,211 +792,119 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4805 + r * (2121 ±0)` // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 298_477_000 picoseconds. - Weight::from_parts(325_394_306, 13220) - // Standard Error: 873_781 - .saturating_add(Weight::from_parts(255_748_093, 0).saturating_mul(r.into())) + // Minimum execution time: 295_922_000 picoseconds. + Weight::from_parts(322_472_877, 13220) + // Standard Error: 993_812 + .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((41_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `947 + r * (10 ±0)` - // Estimated: `9363 + r * (10 ±0)` - // Minimum execution time: 260_352_000 picoseconds. - Weight::from_parts(287_284_570, 9363) - // Standard Error: 4_051 - .saturating_add(Weight::from_parts(1_350_197, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 9_427_000 picoseconds. + Weight::from_parts(12_996_213, 1561) + // Standard Error: 845 + .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (10 ±0)` - // Estimated: `9283 + r * (10 ±0)` - // Minimum execution time: 277_214_000 picoseconds. - Weight::from_parts(295_852_897, 9283) - // Standard Error: 1_554 - .saturating_add(Weight::from_parts(2_120_577, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_304_000 picoseconds. + Weight::from_parts(25_678_842, 0) + // Standard Error: 1_855 + .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:6 w:6) + /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `883 + t * (32 ±0)` - // Estimated: `9303 + t * (2508 ±0)` - // Minimum execution time: 282_107_000 picoseconds. - Weight::from_parts(300_658_543, 9303) - // Standard Error: 97_515 - .saturating_add(Weight::from_parts(1_999_680, 0).saturating_mul(t.into())) - // Standard Error: 27 - .saturating_add(Weight::from_parts(417, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `0` + // Estimated: `990 + t * (2475 ±0)` + // Minimum execution time: 23_425_000 picoseconds. + Weight::from_parts(15_229_010, 990) + // Standard Error: 14_380 + .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (7 ±0)` - // Estimated: `9285 + r * (7 ±0)` - // Minimum execution time: 170_105_000 picoseconds. - Weight::from_parts(185_260_479, 9285) - // Standard Error: 451 - .saturating_add(Weight::from_parts(227_483, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_117_000 picoseconds. + Weight::from_parts(12_887_533, 0) + // Standard Error: 83 + .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125816` - // Estimated: `131758` - // Minimum execution time: 415_029_000 picoseconds. - Weight::from_parts(398_551_260, 131758) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_027, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_982_000 picoseconds. + Weight::from_parts(11_176_000, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `927 + r * (292 ±0)` - // Estimated: `929 + r * (293 ±0)` - // Minimum execution time: 279_920_000 picoseconds. - Weight::from_parts(190_991_719, 929) - // Standard Error: 10_104 - .saturating_add(Weight::from_parts(6_458_687, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_150_000 picoseconds. + Weight::from_parts(9_269_000, 105) + // Standard Error: 8_147 + .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1450` - // Estimated: `1433` - // Minimum execution time: 294_847_000 picoseconds. - Weight::from_parts(350_895_957, 1433) - // Standard Error: 80 - .saturating_add(Weight::from_parts(676, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(15_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Measured: `245` + // Estimated: `245` + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_007_323, 245) + // Standard Error: 3 + .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1256 + n * (1 ±0)` - // Estimated: `1256 + n * (1 ±0)` - // Minimum execution time: 280_398_000 picoseconds. - Weight::from_parts(306_915_984, 1256) - // Standard Error: 36 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 19_127_000 picoseconds. + Weight::from_parts(21_152_987, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1302,31 +912,29 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (288 ±0)` - // Estimated: `930 + r * (289 ±0)` - // Minimum execution time: 269_338_000 picoseconds. - Weight::from_parts(192_925_453, 930) - // Standard Error: 9_912 - .saturating_add(Weight::from_parts(6_299_237, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_264_000 picoseconds. + Weight::from_parts(9_449_000, 105) + // Standard Error: 8_196 + .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 281_305_000 picoseconds. - Weight::from_parts(305_828_464, 1252) - // Standard Error: 34 - .saturating_add(Weight::from_parts(490, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 18_489_000 picoseconds. + Weight::from_parts(19_916_153, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1334,30 +942,27 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (296 ±0)` - // Estimated: `926 + r * (297 ±0)` - // Minimum execution time: 267_838_000 picoseconds. - Weight::from_parts(224_154_959, 926) - // Standard Error: 7_904 - .saturating_add(Weight::from_parts(5_123_059, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_464_000, 105) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1268 + n * (1 ±0)` - // Estimated: `1268 + n * (1 ±0)` - // Minimum execution time: 280_093_000 picoseconds. - Weight::from_parts(304_698_374, 1268) - // Standard Error: 46 - .saturating_add(Weight::from_parts(579, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 17_981_000 picoseconds. + Weight::from_parts(19_802_353, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1365,30 +970,27 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `935 + r * (288 ±0)` - // Estimated: `932 + r * (289 ±0)` - // Minimum execution time: 263_759_000 picoseconds. - Weight::from_parts(214_010_246, 932) - // Standard Error: 8_052 - .saturating_add(Weight::from_parts(4_953_264, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_891_000 picoseconds. + Weight::from_parts(10_046_000, 105) + // Standard Error: 6_993 + .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1255 + n * (1 ±0)` - // Estimated: `1255 + n * (1 ±0)` - // Minimum execution time: 273_652_000 picoseconds. - Weight::from_parts(299_141_902, 1255) - // Standard Error: 38 - .saturating_add(Weight::from_parts(337, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 17_229_000 picoseconds. + Weight::from_parts(18_302_733, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1396,670 +998,358 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (296 ±0)` - // Estimated: `922 + r * (297 ±0)` - // Minimum execution time: 273_392_000 picoseconds. - Weight::from_parts(192_725_781, 922) - // Standard Error: 10_264 - .saturating_add(Weight::from_parts(6_353_931, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_323_000 picoseconds. + Weight::from_parts(9_462_000, 105) + // Standard Error: 8_031 + .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1269 + n * (1 ±0)` - // Estimated: `1269 + n * (1 ±0)` - // Minimum execution time: 284_546_000 picoseconds. - Weight::from_parts(309_720_024, 1269) - // Standard Error: 33 - .saturating_add(Weight::from_parts(664, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 18_711_000 picoseconds. + Weight::from_parts(20_495_670, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1602 w:1601) + /// Storage: `System::Account` (r:1601 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1418 + r * (45 ±0)` - // Estimated: `9785 + r * (2520 ±0)` - // Minimum execution time: 280_447_000 picoseconds. - Weight::from_parts(354_702_861, 9785) - // Standard Error: 42_509 - .saturating_add(Weight::from_parts(34_678_454, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `770` + // Estimated: `4221 + r * (2475 ±0)` + // Minimum execution time: 9_226_000 picoseconds. + Weight::from_parts(9_394_000, 4221) + // Standard Error: 14_741 + .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:803 w:803) + /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1263 + r * (245 ±0)` - // Estimated: `9635 + r * (2721 ±0)` - // Minimum execution time: 279_400_000 picoseconds. - Weight::from_parts(282_198_000, 9635) - // Standard Error: 109_250 - .saturating_add(Weight::from_parts(246_481_216, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) + // Measured: `520 + r * (170 ±0)` + // Estimated: `6463 + r * (2646 ±0)` + // Minimum execution time: 9_455_000 picoseconds. + Weight::from_parts(9_671_000, 6463) + // Standard Error: 126_080 + .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2721).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:736 w:0) + /// Storage: `Contracts::PristineCode` (r:735 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:737 w:737) + /// Storage: `System::EventTopics` (r:736 w:736) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (576 ±0)` - // Estimated: `9290 + r * (2637 ±3)` - // Minimum execution time: 276_509_000 picoseconds. - Weight::from_parts(281_555_000, 9290) - // Standard Error: 133_738 - .saturating_add(Weight::from_parts(244_671_777, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `0 + r * (527 ±0)` + // Estimated: `6447 + r * (2583 ±10)` + // Minimum execution time: 9_274_000 picoseconds. + Weight::from_parts(9_437_000, 6447) + // Standard Error: 150_832 + .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2637).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1310 + t * (277 ±0)` - // Estimated: `12200 + t * (5227 ±0)` - // Minimum execution time: 464_343_000 picoseconds. - Weight::from_parts(485_002_000, 12200) - // Standard Error: 7_933_446 - .saturating_add(Weight::from_parts(172_853_968, 0).saturating_mul(t.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(775, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Measured: `699 + t * (277 ±0)` + // Estimated: `6639 + t * (3458 ±0)` + // Minimum execution time: 214_483_000 picoseconds. + Weight::from_parts(122_634_366, 6639) + // Standard Error: 2_499_235 + .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 5227).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) + /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:801 w:0) + /// Storage: `Contracts::PristineCode` (r:800 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:803 w:803) + /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `System::Account` (r:802 w:802) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1281 + r * (255 ±0)` - // Estimated: `9623 + r * (2731 ±0)` - // Minimum execution time: 661_757_000 picoseconds. - Weight::from_parts(676_799_000, 9623) - // Standard Error: 280_583 - .saturating_add(Weight::from_parts(372_936_154, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) - .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(7_u64)) - .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2731).saturating_mul(r.into())) + // Measured: `1097 + r * (188 ±0)` + // Estimated: `6990 + r * (2664 ±0)` + // Minimum execution time: 341_569_000 picoseconds. + Weight::from_parts(360_574_000, 6990) + // Standard Error: 259_746 + .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1306 + t * (104 ±0)` - // Estimated: `12214 + t * (2549 ±1)` - // Minimum execution time: 2_217_563_000 picoseconds. - Weight::from_parts(1_188_285_504, 12214) - // Standard Error: 12_397_366 - .saturating_add(Weight::from_parts(10_833_274, 0).saturating_mul(t.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_084, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_238, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(19_u64)) + // Measured: `760 + t * (104 ±0)` + // Estimated: `6719 + t * (2549 ±1)` + // Minimum execution time: 1_863_119_000 picoseconds. + Weight::from_parts(900_189_174, 6719) + // Standard Error: 13_040_979 + .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(11_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (8 ±0)` - // Estimated: `9279 + r * (8 ±0)` - // Minimum execution time: 274_367_000 picoseconds. - Weight::from_parts(294_958_322, 9279) - // Standard Error: 1_239 - .saturating_add(Weight::from_parts(388_976, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_211_000 picoseconds. + Weight::from_parts(11_696_412, 0) + // Standard Error: 388 + .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873` - // Estimated: `9286` - // Minimum execution time: 260_947_000 picoseconds. - Weight::from_parts(271_974_409, 9286) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_296_000 picoseconds. + Weight::from_parts(572_494, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_087, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9284 + r * (8 ±0)` - // Minimum execution time: 256_505_000 picoseconds. - Weight::from_parts(288_574_804, 9284) - // Standard Error: 1_115 - .saturating_add(Weight::from_parts(787_123, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_177_000 picoseconds. + Weight::from_parts(8_620_481, 0) + // Standard Error: 249 + .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9292` - // Minimum execution time: 261_657_000 picoseconds. - Weight::from_parts(283_908_184, 9292) - // Standard Error: 1 - .saturating_add(Weight::from_parts(3_345, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_240_000 picoseconds. + Weight::from_parts(8_696_186, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9286 + r * (8 ±0)` - // Minimum execution time: 262_311_000 picoseconds. - Weight::from_parts(295_454_976, 9286) - // Standard Error: 558 - .saturating_add(Weight::from_parts(434_922, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_889_000 picoseconds. + Weight::from_parts(16_103_170, 0) + // Standard Error: 343 + .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9291` - // Minimum execution time: 269_002_000 picoseconds. - Weight::from_parts(280_531_070, 9291) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(2_264_024, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9283 + r * (8 ±0)` - // Minimum execution time: 259_660_000 picoseconds. - Weight::from_parts(287_625_483, 9283) - // Standard Error: 524 - .saturating_add(Weight::from_parts(456_200, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_215_000 picoseconds. + Weight::from_parts(10_505_632, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9289` - // Minimum execution time: 263_305_000 picoseconds. - Weight::from_parts(278_483_877, 9289) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 125697]`. - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1000 + n * (1 ±0)` - // Estimated: `9412 + n * (1 ±0)` - // Minimum execution time: 329_511_000 picoseconds. - Weight::from_parts(341_570_880, 9412) - // Standard Error: 11 - .saturating_add(Weight::from_parts(5_914, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 160]`. - fn seal_sr25519_verify(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `808 + r * (112 ±0)` - // Estimated: `9226 + r * (112 ±0)` - // Minimum execution time: 269_103_000 picoseconds. - Weight::from_parts(317_360_842, 9226) - // Standard Error: 16_463 - .saturating_add(Weight::from_parts(45_965_726, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_440_000 picoseconds. + Weight::from_parts(2_575_889, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 125697]`. + fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 55_119_000 picoseconds. + Weight::from_parts(56_732_248, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) + } + /// The range of component `r` is `[0, 160]`. + fn seal_sr25519_verify(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_861_102, 0) + // Standard Error: 6_029 + .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) + } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `910 + r * (76 ±0)` - // Estimated: `9279 + r * (77 ±0)` - // Minimum execution time: 281_685_000 picoseconds. - Weight::from_parts(339_617_056, 9279) - // Standard Error: 15_672 - .saturating_add(Weight::from_parts(45_907_026, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(28_785_765, 0) + // Standard Error: 9_160 + .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + r * (42 ±0)` - // Estimated: `9294 + r * (42 ±0)` - // Minimum execution time: 265_009_000 picoseconds. - Weight::from_parts(304_895_744, 9294) - // Standard Error: 7_640 - .saturating_add(Weight::from_parts(12_117_411, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_206_000 picoseconds. + Weight::from_parts(12_420_664, 0) + // Standard Error: 3_489 + .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1536 w:0) + /// Storage: `Contracts::PristineCode` (r:1535 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1538 w:1538) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1537 w:1537) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (965 ±0)` - // Estimated: `9285 + r * (3090 ±7)` - // Minimum execution time: 281_110_000 picoseconds. - Weight::from_parts(283_554_000, 9285) - // Standard Error: 47_136 - .saturating_add(Weight::from_parts(27_448_052, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `0 + r * (926 ±0)` + // Estimated: `8969 + r * (3047 ±7)` + // Minimum execution time: 9_219_000 picoseconds. + Weight::from_parts(9_385_000, 8969) + // Standard Error: 45_562 + .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (131 ±0)` - // Estimated: `9346 + r * (2607 ±0)` - // Minimum execution time: 274_312_000 picoseconds. - Weight::from_parts(297_853_480, 9346) - // Standard Error: 31_172 - .saturating_add(Weight::from_parts(6_829_169, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `274 + r * (78 ±0)` + // Estimated: `1265 + r * (2553 ±0)` + // Minimum execution time: 9_355_000 picoseconds. + Weight::from_parts(15_071_309, 1265) + // Standard Error: 9_722 + .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `972 + r * (184 ±0)` - // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 271_607_000 picoseconds. - Weight::from_parts(299_008_266, 129453) - // Standard Error: 25_085 - .saturating_add(Weight::from_parts(5_828_791, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `275 + r * (78 ±0)` + // Estimated: `990 + r * (2568 ±0)` + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(14_362_224, 990) + // Standard Error: 9_137 + .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) } @@ -2084,83 +1374,46 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 275_135_000 picoseconds. - Weight::from_parts(289_363_447, 9282) - // Standard Error: 501 - .saturating_add(Weight::from_parts(171_024, 0).saturating_mul(r.into())) + // Minimum execution time: 269_704_000 picoseconds. + Weight::from_parts(289_916_035, 9282) + // Standard Error: 408 + .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2112 + r * (39 ±0)` - // Estimated: `10377 + r * (40 ±0)` - // Minimum execution time: 279_752_000 picoseconds. - Weight::from_parts(322_774_890, 10377) - // Standard Error: 867 - .saturating_add(Weight::from_parts(262_474, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_361_000 picoseconds. + Weight::from_parts(11_633_836, 0) + // Standard Error: 86 + .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (3 ±0)` - // Estimated: `9279 + r * (3 ±0)` - // Minimum execution time: 263_398_000 picoseconds. - Weight::from_parts(291_372_000, 9279) - // Standard Error: 448 - .saturating_add(Weight::from_parts(151_931, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + // Measured: `219` + // Estimated: `1704` + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(13_259_836, 1704) + // Standard Error: 121 + .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. fn instr_i64_load_store(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 838_000 picoseconds. - Weight::from_parts(670_057, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(15_037, 0).saturating_mul(r.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(587_883, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) } } @@ -2172,8 +1425,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_140_000 picoseconds. - Weight::from_parts(2_243_000, 1627) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_274_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2183,10 +1436,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_655_000 picoseconds. - Weight::from_parts(12_863_000, 442) - // Standard Error: 1_147 - .saturating_add(Weight::from_parts(1_097_546, 0).saturating_mul(k.into())) + // Minimum execution time: 12_863_000 picoseconds. + Weight::from_parts(13_188_000, 442) + // Standard Error: 1_053 + .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2200,10 +1453,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_626_000 picoseconds. - Weight::from_parts(8_805_071, 6149) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(9_203_290, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_188, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2216,8 +1469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_748_000 picoseconds. - Weight::from_parts(17_483_000, 6450) + // Minimum execution time: 17_177_000 picoseconds. + Weight::from_parts(17_663_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2230,10 +1483,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_632_000 picoseconds. - Weight::from_parts(3_706_000, 3635) - // Standard Error: 903 - .saturating_add(Weight::from_parts(1_185_348, 0).saturating_mul(k.into())) + // Minimum execution time: 3_636_000 picoseconds. + Weight::from_parts(3_774_000, 3635) + // Standard Error: 542 + .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2254,10 +1507,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_630_000 picoseconds. - Weight::from_parts(20_845_294, 6266) - // Standard Error: 2 - .saturating_add(Weight::from_parts(421, 0).saturating_mul(c.into())) + // Minimum execution time: 21_585_000 picoseconds. + Weight::from_parts(22_069_944, 6266) + // Standard Error: 1 + .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2268,8 +1521,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_178_000 picoseconds. - Weight::from_parts(13_671_000, 6380) + // Minimum execution time: 13_283_000 picoseconds. + Weight::from_parts(14_015_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2283,8 +1536,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 45_828_000 picoseconds. - Weight::from_parts(46_476_000, 6292) + // Minimum execution time: 48_022_000 picoseconds. + Weight::from_parts(49_627_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2296,19 +1549,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 54_767_000 picoseconds. - Weight::from_parts(56_879_000, 6534) + // Minimum execution time: 58_374_000 picoseconds. + Weight::from_parts(59_615_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } + /// Storage: `Contracts::ContractInfoOf` (r:2 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + fn v16_migration_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `409` + // Estimated: `6349` + // Minimum execution time: 12_559_000 picoseconds. + Weight::from_parts(12_947_000, 6349) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } /// Storage: `Contracts::MigrationInProgress` (r:1 w:1) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) fn migration_noop() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_599_000 picoseconds. - Weight::from_parts(2_794_000, 1627) + // Minimum execution time: 2_480_000 picoseconds. + Weight::from_parts(2_680_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2320,8 +1584,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_275_000 picoseconds. - Weight::from_parts(10_709_000, 3631) + // Minimum execution time: 12_625_000 picoseconds. + Weight::from_parts(13_094_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2331,8 +1595,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 3_813_000 picoseconds. - Weight::from_parts(4_033_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_182_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2343,8 +1607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_160_000 picoseconds. - Weight::from_parts(5_478_000, 3632) + // Minimum execution time: 6_319_000 picoseconds. + Weight::from_parts(6_582_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2355,8 +1619,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_178_000 picoseconds. - Weight::from_parts(5_540_000, 3607) + // Minimum execution time: 6_532_000 picoseconds. + Weight::from_parts(6_909_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2381,10 +1645,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 295_545_000 picoseconds. - Weight::from_parts(291_859_570, 9217) - // Standard Error: 73 - .saturating_add(Weight::from_parts(33_546, 0).saturating_mul(c.into())) + // Minimum execution time: 305_778_000 picoseconds. + Weight::from_parts(282_321_249, 9217) + // Standard Error: 72 + .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2416,14 +1680,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_796_729_000 picoseconds. - Weight::from_parts(808_328_941, 8740) - // Standard Error: 156 - .saturating_add(Weight::from_parts(99_863, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_433, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_501, 0).saturating_mul(s.into())) + // Minimum execution time: 3_810_809_000 picoseconds. + Weight::from_parts(739_511_598, 8740) + // Standard Error: 140 + .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -2453,12 +1717,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_004_502_000 picoseconds. - Weight::from_parts(2_013_544_000, 8982) + // Minimum execution time: 1_986_789_000 picoseconds. + Weight::from_parts(2_017_466_000, 8982) // Standard Error: 26 - .saturating_add(Weight::from_parts(807, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) // Standard Error: 26 - .saturating_add(Weight::from_parts(784, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -2482,8 +1746,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 213_568_000 picoseconds. - Weight::from_parts(219_796_000, 9244) + // Minimum execution time: 210_724_000 picoseconds. + Weight::from_parts(218_608_000, 9244) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2504,10 +1768,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 290_835_000 picoseconds. - Weight::from_parts(396_613_751, 6085) - // Standard Error: 160 - .saturating_add(Weight::from_parts(63_993, 0).saturating_mul(c.into())) + // Minimum execution time: 271_259_000 picoseconds. + Weight::from_parts(298_852_854, 6085) + // Standard Error: 65 + .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2528,10 +1792,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 302_249_000 picoseconds. - Weight::from_parts(312_519_255, 6085) - // Standard Error: 90 - .saturating_add(Weight::from_parts(64_908, 0).saturating_mul(c.into())) + // Minimum execution time: 278_167_000 picoseconds. + Weight::from_parts(311_888_941, 6085) + // Standard Error: 58 + .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2549,8 +1813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 46_580_000 picoseconds. - Weight::from_parts(47_676_000, 3780) + // Minimum execution time: 47_403_000 picoseconds. + Weight::from_parts(48_707_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2566,447 +1830,174 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 35_454_000 picoseconds. - Weight::from_parts(36_956_000, 8967) + // Minimum execution time: 35_361_000 picoseconds. + Weight::from_parts(36_714_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869 + r * (6 ±0)` - // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 265_797_000 picoseconds. - Weight::from_parts(305_958_124, 9284) - // Standard Error: 3_049 - .saturating_add(Weight::from_parts(343_670, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_340_000 picoseconds. + Weight::from_parts(9_360_237, 0) + // Standard Error: 269 + .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `925 + r * (209 ±0)` - // Estimated: `9304 + r * (2684 ±0)` - // Minimum execution time: 273_327_000 picoseconds. - Weight::from_parts(143_036_031, 9304) - // Standard Error: 5_612 - .saturating_add(Weight::from_parts(3_745_297, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `509 + r * (77 ±0)` + // Estimated: `1467 + r * (2552 ±0)` + // Minimum execution time: 9_059_000 picoseconds. + Weight::from_parts(9_201_000, 1467) + // Standard Error: 5_643 + .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) + /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (213 ±0)` - // Estimated: `9308 + r * (2688 ±0)` - // Minimum execution time: 279_590_000 picoseconds. - Weight::from_parts(147_961_398, 9308) - // Standard Error: 6_198 - .saturating_add(Weight::from_parts(4_677_881, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `517 + r * (170 ±0)` + // Estimated: `1468 + r * (2645 ±0)` + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_399_000, 1468) + // Standard Error: 6_194 + .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `876 + r * (6 ±0)` - // Estimated: `9293 + r * (6 ±0)` - // Minimum execution time: 277_334_000 picoseconds. - Weight::from_parts(285_263_644, 9293) - // Standard Error: 771 - .saturating_add(Weight::from_parts(450_538, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_707_000 picoseconds. + Weight::from_parts(10_100_456, 0) + // Standard Error: 234 + .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (3 ±0)` - // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 262_895_000 picoseconds. - Weight::from_parts(290_274_813, 9282) - // Standard Error: 356 - .saturating_add(Weight::from_parts(172_585, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_524_000 picoseconds. + Weight::from_parts(10_813_389, 0) + // Standard Error: 76 + .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `756 + r * (3 ±0)` - // Estimated: `9171 + r * (3 ±0)` - // Minimum execution time: 261_701_000 picoseconds. - Weight::from_parts(279_765_708, 9171) - // Standard Error: 451 - .saturating_add(Weight::from_parts(158_243, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_799_000 picoseconds. + Weight::from_parts(10_886_744, 0) + // Standard Error: 75 + .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) + } /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870 + r * (6 ±0)` - // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 284_209_000 picoseconds. - Weight::from_parts(294_215_782, 9285) - // Standard Error: 703 - .saturating_add(Weight::from_parts(344_236, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_895_000 picoseconds. + Weight::from_parts(10_658_338, 0) + // Standard Error: 189 + .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `9284 + r * (6 ±0)` - // Minimum execution time: 277_126_000 picoseconds. - Weight::from_parts(292_436_333, 9284) - // Standard Error: 1_215 - .saturating_add(Weight::from_parts(380_107, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_643_000 picoseconds. + Weight::from_parts(10_932_126, 0) + // Standard Error: 153 + .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:0) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1010 + r * (6 ±0)` - // Estimated: `9409 + r * (6 ±0)` - // Minimum execution time: 266_377_000 picoseconds. - Weight::from_parts(295_163_193, 9409) - // Standard Error: 4_026 - .saturating_add(Weight::from_parts(1_859_387, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `140` + // Estimated: `3599` + // Minimum execution time: 9_548_000 picoseconds. + Weight::from_parts(9_737_000, 3599) + // Standard Error: 971 + .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + r * (6 ±0)` - // Estimated: `9301 + r * (6 ±0)` - // Minimum execution time: 276_990_000 picoseconds. - Weight::from_parts(296_463_738, 9301) - // Standard Error: 655 - .saturating_add(Weight::from_parts(335_070, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_172_000 picoseconds. + Weight::from_parts(18_255_933, 0) + // Standard Error: 540 + .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `878 + r * (6 ±0)` - // Estimated: `9294 + r * (6 ±0)` - // Minimum execution time: 274_845_000 picoseconds. - Weight::from_parts(294_870_901, 9294) - // Standard Error: 793 - .saturating_add(Weight::from_parts(336_049, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_232_000 picoseconds. + Weight::from_parts(9_796_584, 0) + // Standard Error: 208 + .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `9297 + r * (6 ±0)` - // Minimum execution time: 277_792_000 picoseconds. - Weight::from_parts(290_529_469, 9297) - // Standard Error: 1_191 - .saturating_add(Weight::from_parts(339_291, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_747_000 picoseconds. + Weight::from_parts(8_733_230, 0) + // Standard Error: 377 + .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `9282 + r * (6 ±0)` - // Minimum execution time: 271_060_000 picoseconds. - Weight::from_parts(287_512_151, 9282) - // Standard Error: 859 - .saturating_add(Weight::from_parts(345_414, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_214_000 picoseconds. + Weight::from_parts(10_194_153, 0) + // Standard Error: 516 + .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `940 + r * (14 ±0)` - // Estimated: `9350 + r * (14 ±0)` - // Minimum execution time: 265_401_000 picoseconds. - Weight::from_parts(303_350_793, 9350) - // Standard Error: 1_995 - .saturating_add(Weight::from_parts(885_253, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) + // Measured: `67` + // Estimated: `1552` + // Minimum execution time: 9_022_000 picoseconds. + Weight::from_parts(22_051_160, 1552) + // Standard Error: 697 + .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `868 + r * (6 ±0)` - // Estimated: `9285 + r * (6 ±0)` - // Minimum execution time: 264_474_000 picoseconds. - Weight::from_parts(289_972_634, 9285) - // Standard Error: 719 - .saturating_add(Weight::from_parts(269_856, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_135_000 picoseconds. + Weight::from_parts(10_646_215, 0) + // Standard Error: 161 + .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -3029,67 +2020,32 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 266_661_000 picoseconds. - Weight::from_parts(147_923_867, 9287) + // Minimum execution time: 273_896_000 picoseconds. + Weight::from_parts(148_309_654, 9287) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_365, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `856 + r * (45 ±0)` - // Estimated: `9271 + r * (45 ±0)` - // Minimum execution time: 257_851_000 picoseconds. - Weight::from_parts(285_687_679, 9271) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_906_000 picoseconds. + Weight::from_parts(9_264_446, 0) + // Standard Error: 19_760 + .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866` - // Estimated: `9288` - // Minimum execution time: 283_478_000 picoseconds. - Weight::from_parts(290_827_773, 9288) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_266_000 picoseconds. + Weight::from_parts(10_602_261, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(319, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -3118,211 +2074,119 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4805 + r * (2121 ±0)` // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 298_477_000 picoseconds. - Weight::from_parts(325_394_306, 13220) - // Standard Error: 873_781 - .saturating_add(Weight::from_parts(255_748_093, 0).saturating_mul(r.into())) + // Minimum execution time: 295_922_000 picoseconds. + Weight::from_parts(322_472_877, 13220) + // Standard Error: 993_812 + .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((41_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `947 + r * (10 ±0)` - // Estimated: `9363 + r * (10 ±0)` - // Minimum execution time: 260_352_000 picoseconds. - Weight::from_parts(287_284_570, 9363) - // Standard Error: 4_051 - .saturating_add(Weight::from_parts(1_350_197, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 9_427_000 picoseconds. + Weight::from_parts(12_996_213, 1561) + // Standard Error: 845 + .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. - fn seal_deposit_event(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `866 + r * (10 ±0)` - // Estimated: `9283 + r * (10 ±0)` - // Minimum execution time: 277_214_000 picoseconds. - Weight::from_parts(295_852_897, 9283) - // Standard Error: 1_554 - .saturating_add(Weight::from_parts(2_120_577, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:6 w:6) + fn seal_deposit_event(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_304_000 picoseconds. + Weight::from_parts(25_678_842, 0) + // Standard Error: 1_855 + .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) + } + /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `883 + t * (32 ±0)` - // Estimated: `9303 + t * (2508 ±0)` - // Minimum execution time: 282_107_000 picoseconds. - Weight::from_parts(300_658_543, 9303) - // Standard Error: 97_515 - .saturating_add(Weight::from_parts(1_999_680, 0).saturating_mul(t.into())) - // Standard Error: 27 - .saturating_add(Weight::from_parts(417, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `0` + // Estimated: `990 + t * (2475 ±0)` + // Minimum execution time: 23_425_000 picoseconds. + Weight::from_parts(15_229_010, 990) + // Standard Error: 14_380 + .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2508).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (7 ±0)` - // Estimated: `9285 + r * (7 ±0)` - // Minimum execution time: 170_105_000 picoseconds. - Weight::from_parts(185_260_479, 9285) - // Standard Error: 451 - .saturating_add(Weight::from_parts(227_483, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_117_000 picoseconds. + Weight::from_parts(12_887_533, 0) + // Standard Error: 83 + .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125816` - // Estimated: `131758` - // Minimum execution time: 415_029_000 picoseconds. - Weight::from_parts(398_551_260, 131758) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_027, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_982_000 picoseconds. + Weight::from_parts(11_176_000, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `927 + r * (292 ±0)` - // Estimated: `929 + r * (293 ±0)` - // Minimum execution time: 279_920_000 picoseconds. - Weight::from_parts(190_991_719, 929) - // Standard Error: 10_104 - .saturating_add(Weight::from_parts(6_458_687, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_150_000 picoseconds. + Weight::from_parts(9_269_000, 105) + // Standard Error: 8_147 + .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 293).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1450` - // Estimated: `1433` - // Minimum execution time: 294_847_000 picoseconds. - Weight::from_parts(350_895_957, 1433) - // Standard Error: 80 - .saturating_add(Weight::from_parts(676, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(15_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Measured: `245` + // Estimated: `245` + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_007_323, 245) + // Standard Error: 3 + .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1256 + n * (1 ±0)` - // Estimated: `1256 + n * (1 ±0)` - // Minimum execution time: 280_398_000 picoseconds. - Weight::from_parts(306_915_984, 1256) - // Standard Error: 36 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 19_127_000 picoseconds. + Weight::from_parts(21_152_987, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3330,31 +2194,29 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (288 ±0)` - // Estimated: `930 + r * (289 ±0)` - // Minimum execution time: 269_338_000 picoseconds. - Weight::from_parts(192_925_453, 930) - // Standard Error: 9_912 - .saturating_add(Weight::from_parts(6_299_237, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_264_000 picoseconds. + Weight::from_parts(9_449_000, 105) + // Standard Error: 8_196 + .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 281_305_000 picoseconds. - Weight::from_parts(305_828_464, 1252) - // Standard Error: 34 - .saturating_add(Weight::from_parts(490, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 18_489_000 picoseconds. + Weight::from_parts(19_916_153, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3362,30 +2224,27 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (296 ±0)` - // Estimated: `926 + r * (297 ±0)` - // Minimum execution time: 267_838_000 picoseconds. - Weight::from_parts(224_154_959, 926) - // Standard Error: 7_904 - .saturating_add(Weight::from_parts(5_123_059, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_464_000, 105) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1268 + n * (1 ±0)` - // Estimated: `1268 + n * (1 ±0)` - // Minimum execution time: 280_093_000 picoseconds. - Weight::from_parts(304_698_374, 1268) - // Standard Error: 46 - .saturating_add(Weight::from_parts(579, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 17_981_000 picoseconds. + Weight::from_parts(19_802_353, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3393,30 +2252,27 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `935 + r * (288 ±0)` - // Estimated: `932 + r * (289 ±0)` - // Minimum execution time: 263_759_000 picoseconds. - Weight::from_parts(214_010_246, 932) - // Standard Error: 8_052 - .saturating_add(Weight::from_parts(4_953_264, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_891_000 picoseconds. + Weight::from_parts(10_046_000, 105) + // Standard Error: 6_993 + .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1255 + n * (1 ±0)` - // Estimated: `1255 + n * (1 ±0)` - // Minimum execution time: 273_652_000 picoseconds. - Weight::from_parts(299_141_902, 1255) - // Standard Error: 38 - .saturating_add(Weight::from_parts(337, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 17_229_000 picoseconds. + Weight::from_parts(18_302_733, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3424,670 +2280,358 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `917 + r * (296 ±0)` - // Estimated: `922 + r * (297 ±0)` - // Minimum execution time: 273_392_000 picoseconds. - Weight::from_parts(192_725_781, 922) - // Standard Error: 10_264 - .saturating_add(Weight::from_parts(6_353_931, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `108 + r * (150 ±0)` + // Estimated: `105 + r * (151 ±0)` + // Minimum execution time: 9_323_000 picoseconds. + Weight::from_parts(9_462_000, 105) + // Standard Error: 8_031 + .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1269 + n * (1 ±0)` - // Estimated: `1269 + n * (1 ±0)` - // Minimum execution time: 284_546_000 picoseconds. - Weight::from_parts(309_720_024, 1269) - // Standard Error: 33 - .saturating_add(Weight::from_parts(664, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `248 + n * (1 ±0)` + // Estimated: `248 + n * (1 ±0)` + // Minimum execution time: 18_711_000 picoseconds. + Weight::from_parts(20_495_670, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1602 w:1601) + /// Storage: `System::Account` (r:1601 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1418 + r * (45 ±0)` - // Estimated: `9785 + r * (2520 ±0)` - // Minimum execution time: 280_447_000 picoseconds. - Weight::from_parts(354_702_861, 9785) - // Standard Error: 42_509 - .saturating_add(Weight::from_parts(34_678_454, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `770` + // Estimated: `4221 + r * (2475 ±0)` + // Minimum execution time: 9_226_000 picoseconds. + Weight::from_parts(9_394_000, 4221) + // Standard Error: 14_741 + .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2520).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:803 w:803) + /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1263 + r * (245 ±0)` - // Estimated: `9635 + r * (2721 ±0)` - // Minimum execution time: 279_400_000 picoseconds. - Weight::from_parts(282_198_000, 9635) - // Standard Error: 109_250 - .saturating_add(Weight::from_parts(246_481_216, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) + // Measured: `520 + r * (170 ±0)` + // Estimated: `6463 + r * (2646 ±0)` + // Minimum execution time: 9_455_000 picoseconds. + Weight::from_parts(9_671_000, 6463) + // Standard Error: 126_080 + .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2721).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:736 w:0) + /// Storage: `Contracts::PristineCode` (r:735 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:737 w:737) + /// Storage: `System::EventTopics` (r:736 w:736) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (576 ±0)` - // Estimated: `9290 + r * (2637 ±3)` - // Minimum execution time: 276_509_000 picoseconds. - Weight::from_parts(281_555_000, 9290) - // Standard Error: 133_738 - .saturating_add(Weight::from_parts(244_671_777, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `0 + r * (527 ±0)` + // Estimated: `6447 + r * (2583 ±10)` + // Minimum execution time: 9_274_000 picoseconds. + Weight::from_parts(9_437_000, 6447) + // Standard Error: 150_832 + .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2637).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1310 + t * (277 ±0)` - // Estimated: `12200 + t * (5227 ±0)` - // Minimum execution time: 464_343_000 picoseconds. - Weight::from_parts(485_002_000, 12200) - // Standard Error: 7_933_446 - .saturating_add(Weight::from_parts(172_853_968, 0).saturating_mul(t.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(775, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Measured: `699 + t * (277 ±0)` + // Estimated: `6639 + t * (3458 ±0)` + // Minimum execution time: 214_483_000 picoseconds. + Weight::from_parts(122_634_366, 6639) + // Standard Error: 2_499_235 + .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 5227).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) + /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:801 w:0) + /// Storage: `Contracts::PristineCode` (r:800 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:803 w:803) + /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `System::Account` (r:802 w:802) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1281 + r * (255 ±0)` - // Estimated: `9623 + r * (2731 ±0)` - // Minimum execution time: 661_757_000 picoseconds. - Weight::from_parts(676_799_000, 9623) - // Standard Error: 280_583 - .saturating_add(Weight::from_parts(372_936_154, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) - .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(7_u64)) - .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2731).saturating_mul(r.into())) + // Measured: `1097 + r * (188 ±0)` + // Estimated: `6990 + r * (2664 ±0)` + // Minimum execution time: 341_569_000 picoseconds. + Weight::from_parts(360_574_000, 6990) + // Standard Error: 259_746 + .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:2 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1306 + t * (104 ±0)` - // Estimated: `12214 + t * (2549 ±1)` - // Minimum execution time: 2_217_563_000 picoseconds. - Weight::from_parts(1_188_285_504, 12214) - // Standard Error: 12_397_366 - .saturating_add(Weight::from_parts(10_833_274, 0).saturating_mul(t.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_084, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_238, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(19_u64)) + // Measured: `760 + t * (104 ±0)` + // Estimated: `6719 + t * (2549 ±1)` + // Minimum execution time: 1_863_119_000 picoseconds. + Weight::from_parts(900_189_174, 6719) + // Standard Error: 13_040_979 + .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(11_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (8 ±0)` - // Estimated: `9279 + r * (8 ±0)` - // Minimum execution time: 274_367_000 picoseconds. - Weight::from_parts(294_958_322, 9279) - // Standard Error: 1_239 - .saturating_add(Weight::from_parts(388_976, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_211_000 picoseconds. + Weight::from_parts(11_696_412, 0) + // Standard Error: 388 + .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873` - // Estimated: `9286` - // Minimum execution time: 260_947_000 picoseconds. - Weight::from_parts(271_974_409, 9286) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_296_000 picoseconds. + Weight::from_parts(572_494, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_087, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9284 + r * (8 ±0)` - // Minimum execution time: 256_505_000 picoseconds. - Weight::from_parts(288_574_804, 9284) - // Standard Error: 1_115 - .saturating_add(Weight::from_parts(787_123, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_177_000 picoseconds. + Weight::from_parts(8_620_481, 0) + // Standard Error: 249 + .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) + } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9292` - // Minimum execution time: 261_657_000 picoseconds. - Weight::from_parts(283_908_184, 9292) - // Standard Error: 1 - .saturating_add(Weight::from_parts(3_345, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_240_000 picoseconds. + Weight::from_parts(8_696_186, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9286 + r * (8 ±0)` - // Minimum execution time: 262_311_000 picoseconds. - Weight::from_parts(295_454_976, 9286) - // Standard Error: 558 - .saturating_add(Weight::from_parts(434_922, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_889_000 picoseconds. + Weight::from_parts(16_103_170, 0) + // Standard Error: 343 + .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9291` - // Minimum execution time: 269_002_000 picoseconds. - Weight::from_parts(280_531_070, 9291) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(2_264_024, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (8 ±0)` - // Estimated: `9283 + r * (8 ±0)` - // Minimum execution time: 259_660_000 picoseconds. - Weight::from_parts(287_625_483, 9283) - // Standard Error: 524 - .saturating_add(Weight::from_parts(456_200, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_215_000 picoseconds. + Weight::from_parts(10_505_632, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875` - // Estimated: `9289` - // Minimum execution time: 263_305_000 picoseconds. - Weight::from_parts(278_483_877, 9289) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 10_440_000 picoseconds. + Weight::from_parts(2_575_889, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1000 + n * (1 ±0)` - // Estimated: `9412 + n * (1 ±0)` - // Minimum execution time: 329_511_000 picoseconds. - Weight::from_parts(341_570_880, 9412) - // Standard Error: 11 - .saturating_add(Weight::from_parts(5_914, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 55_119_000 picoseconds. + Weight::from_parts(56_732_248, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `808 + r * (112 ±0)` - // Estimated: `9226 + r * (112 ±0)` - // Minimum execution time: 269_103_000 picoseconds. - Weight::from_parts(317_360_842, 9226) - // Standard Error: 16_463 - .saturating_add(Weight::from_parts(45_965_726, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_861_102, 0) + // Standard Error: 6_029 + .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `910 + r * (76 ±0)` - // Estimated: `9279 + r * (77 ±0)` - // Minimum execution time: 281_685_000 picoseconds. - Weight::from_parts(339_617_056, 9279) - // Standard Error: 15_672 - .saturating_add(Weight::from_parts(45_907_026, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(28_785_765, 0) + // Standard Error: 9_160 + .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + r * (42 ±0)` - // Estimated: `9294 + r * (42 ±0)` - // Minimum execution time: 265_009_000 picoseconds. - Weight::from_parts(304_895_744, 9294) - // Standard Error: 7_640 - .saturating_add(Weight::from_parts(12_117_411, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_206_000 picoseconds. + Weight::from_parts(12_420_664, 0) + // Standard Error: 3_489 + .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1536 w:0) + /// Storage: `Contracts::PristineCode` (r:1535 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1538 w:1538) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1537 w:1537) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + r * (965 ±0)` - // Estimated: `9285 + r * (3090 ±7)` - // Minimum execution time: 281_110_000 picoseconds. - Weight::from_parts(283_554_000, 9285) - // Standard Error: 47_136 - .saturating_add(Weight::from_parts(27_448_052, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `0 + r * (926 ±0)` + // Estimated: `8969 + r * (3047 ±7)` + // Minimum execution time: 9_219_000 picoseconds. + Weight::from_parts(9_385_000, 8969) + // Standard Error: 45_562 + .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3090).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (131 ±0)` - // Estimated: `9346 + r * (2607 ±0)` - // Minimum execution time: 274_312_000 picoseconds. - Weight::from_parts(297_853_480, 9346) - // Standard Error: 31_172 - .saturating_add(Weight::from_parts(6_829_169, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `274 + r * (78 ±0)` + // Estimated: `1265 + r * (2553 ±0)` + // Minimum execution time: 9_355_000 picoseconds. + Weight::from_parts(15_071_309, 1265) + // Standard Error: 9_722 + .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) + /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `MaxEncodedLen`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `972 + r * (184 ±0)` - // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 271_607_000 picoseconds. - Weight::from_parts(299_008_266, 129453) - // Standard Error: 25_085 - .saturating_add(Weight::from_parts(5_828_791, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `275 + r * (78 ±0)` + // Estimated: `990 + r * (2568 ±0)` + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(14_362_224, 990) + // Standard Error: 9_137 + .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) } @@ -4112,82 +2656,45 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 275_135_000 picoseconds. - Weight::from_parts(289_363_447, 9282) - // Standard Error: 501 - .saturating_add(Weight::from_parts(171_024, 0).saturating_mul(r.into())) + // Minimum execution time: 269_704_000 picoseconds. + Weight::from_parts(289_916_035, 9282) + // Standard Error: 408 + .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2112 + r * (39 ±0)` - // Estimated: `10377 + r * (40 ±0)` - // Minimum execution time: 279_752_000 picoseconds. - Weight::from_parts(322_774_890, 10377) - // Standard Error: 867 - .saturating_add(Weight::from_parts(262_474, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_361_000 picoseconds. + Weight::from_parts(11_633_836, 0) + // Standard Error: 86 + .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) + /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (3 ±0)` - // Estimated: `9279 + r * (3 ±0)` - // Minimum execution time: 263_398_000 picoseconds. - Weight::from_parts(291_372_000, 9279) - // Standard Error: 448 - .saturating_add(Weight::from_parts(151_931, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + // Measured: `219` + // Estimated: `1704` + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(13_259_836, 1704) + // Standard Error: 121 + .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. fn instr_i64_load_store(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 838_000 picoseconds. - Weight::from_parts(670_057, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(15_037, 0).saturating_mul(r.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(587_883, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index 7b6edb37b7f7..e2b40974579e 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -103,6 +103,24 @@ impl LeafDataProvider for ParentNumberAndHash { } } +/// Block hash provider for a given block number. +pub trait BlockHashProvider { + fn block_hash(block_number: BlockNumber) -> BlockHash; +} + +/// Default implementation of BlockHashProvider using frame_system. +pub struct DefaultBlockHashProvider { + _phantom: sp_std::marker::PhantomData, +} + +impl BlockHashProvider, T::Hash> + for DefaultBlockHashProvider +{ + fn block_hash(block_number: BlockNumberFor) -> T::Hash { + frame_system::Pallet::::block_hash(block_number) + } +} + pub trait WeightInfo { fn on_initialize(peaks: NodeIndex) -> Weight; } @@ -177,6 +195,12 @@ pub mod pallet { /// Clients. Hook complexity should be `O(1)`. type OnNewRoot: primitives::OnNewRoot>; + /// Block hash provider for a given block number. + type BlockHashProvider: BlockHashProvider< + BlockNumberFor, + ::Hash, + >; + /// Weights for this pallet. type WeightInfo: WeightInfo; } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index 96a20c3445ee..f2acc35a137f 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -29,7 +29,7 @@ use sp_std::prelude::*; use crate::{ mmr::{Node, NodeOf}, primitives::{self, NodeIndex}, - Config, Nodes, NumberOfLeaves, Pallet, + BlockHashProvider, Config, Nodes, NumberOfLeaves, Pallet, }; /// A marker type for runtime-specific storage implementation. @@ -87,7 +87,7 @@ where // Fall through to searching node using fork-specific key. let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); - let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); + let ancestor_parent_hash = T::BlockHashProvider::block_hash(ancestor_parent_block_num); let temp_key = Pallet::::node_temp_offchain_key(pos, ancestor_parent_hash); debug!( target: "runtime::mmr::offchain", diff --git a/substrate/frame/merkle-mountain-range/src/mock.rs b/substrate/frame/merkle-mountain-range/src/mock.rs index 212012a052a0..8318b20e8307 100644 --- a/substrate/frame/merkle-mountain-range/src/mock.rs +++ b/substrate/frame/merkle-mountain-range/src/mock.rs @@ -44,6 +44,7 @@ impl Config for Test { type Hashing = Keccak256; type LeafData = Compact, LeafData)>; type OnNewRoot = (); + type BlockHashProvider = DefaultBlockHashProvider; type WeightInfo = (); } diff --git a/substrate/frame/parameters/src/tests/mock.rs b/substrate/frame/parameters/src/tests/mock.rs index 4c7dda639a9a..6cfd7c8f30b8 100644 --- a/substrate/frame/parameters/src/tests/mock.rs +++ b/substrate/frame/parameters/src/tests/mock.rs @@ -16,6 +16,7 @@ // limitations under the License. #![cfg(any(test, feature = "runtime-benchmarks"))] +#![allow(non_snake_case)] //! Mock runtime that configures the `pallet_example_basic` to use dynamic params for testing. @@ -66,6 +67,20 @@ pub mod dynamic_params { #[codec(index = 0)] pub static Key3: u128 = 4; } + + #[dynamic_pallet_params] + #[codec(index = 2)] + pub mod nis { + #[codec(index = 0)] + pub static Target: u64 = 0; + } + + #[dynamic_pallet_params] + #[codec(index = 3)] + pub mod somE_weird_SPElLInG_s { + #[codec(index = 0)] + pub static V: u64 = 0; + } } #[docify::export(benchmarking_default)] @@ -98,6 +113,8 @@ mod custom_origin { } match key { + RuntimeParametersKey::SomEWeirdSPElLInGS(_) | + RuntimeParametersKey::Nis(_) | RuntimeParametersKey::Pallet1(_) => ensure_root(origin.clone()), RuntimeParametersKey::Pallet2(_) => ensure_signed(origin.clone()).map(|_| ()), } diff --git a/substrate/frame/support/procedural/src/dynamic_params.rs b/substrate/frame/support/procedural/src/dynamic_params.rs index 29399a885bc6..ad62f59e6b0a 100644 --- a/substrate/frame/support/procedural/src/dynamic_params.rs +++ b/substrate/frame/support/procedural/src/dynamic_params.rs @@ -91,7 +91,7 @@ impl ToTokens for DynamicParamModAttr { let mut quoted_enum = quote! {}; for m in self.inner_mods() { let aggregate_name = - syn::Ident::new(&m.ident.to_string().to_class_case(), m.ident.span()); + syn::Ident::new(&m.ident.to_string().to_pascal_case(), m.ident.span()); let mod_name = &m.ident; let mut attrs = m.attrs.clone(); @@ -222,8 +222,10 @@ impl ToTokens for DynamicPalletParamAttr { let (params_mod, parameter_pallet, runtime_params) = (&self.inner_mod, &self.meta.parameter_pallet, &self.meta.runtime_params); - let aggregate_name = - syn::Ident::new(¶ms_mod.ident.to_string().to_class_case(), params_mod.ident.span()); + let aggregate_name = syn::Ident::new( + ¶ms_mod.ident.to_string().to_pascal_case(), + params_mod.ident.span(), + ); let (mod_name, vis) = (¶ms_mod.ident, ¶ms_mod.vis); let statics = self.statics(); diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 66777cef7b8e..a423656c394f 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -36,7 +36,7 @@ mod members; pub use members::{AllowAll, DenyAll, Filter}; pub use members::{ AsContains, ChangeMembers, Contains, ContainsLengthBound, ContainsPair, Equals, Everything, - EverythingBut, FromContainsPair, InitializeMembers, InsideBoth, IsInVec, Nothing, + EverythingBut, FromContains, FromContainsPair, InitializeMembers, InsideBoth, IsInVec, Nothing, RankedMembers, RankedMembersSwapHandler, SortedMembers, TheseExcept, }; diff --git a/substrate/frame/support/src/traits/members.rs b/substrate/frame/support/src/traits/members.rs index 3a6e3719593a..53de84ab2245 100644 --- a/substrate/frame/support/src/traits/members.rs +++ b/substrate/frame/support/src/traits/members.rs @@ -66,6 +66,15 @@ impl> Contains<(A, B)> for FromContainsPair { } } +/// A [`ContainsPair`] implementation that has a `Contains` implementation for each member of the +/// pair. +pub struct FromContains(PhantomData<(CA, CB)>); +impl, CB: Contains> ContainsPair for FromContains { + fn contains(a: &A, b: &B) -> bool { + CA::contains(a) && CB::contains(b) + } +} + /// A [`Contains`] implementation that contains every value. pub enum Everything {} impl Contains for Everything { diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs index 3635311e6435..8842b2058018 100644 --- a/substrate/frame/support/src/traits/tokens.rs +++ b/substrate/frame/support/src/traits/tokens.rs @@ -31,7 +31,7 @@ pub mod pay; pub use misc::{ AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance, ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision, - Preservation, Provenance, Restriction, UnityAssetBalanceConversion, WithdrawConsequence, - WithdrawReasons, + Preservation, Provenance, Restriction, UnityAssetBalanceConversion, UnityOrOuterConversion, + WithdrawConsequence, WithdrawReasons, }; pub use pay::{Pay, PayFromAccount, PaymentStatus}; diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 575b771a6144..db44b2f43a4e 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -925,3 +925,28 @@ impl< } } } + +impl< + Left: fungible::Inspect, + Right: fungibles::Inspect + fungibles::Refund, + Criterion: Convert>::AssetId>>, + AssetKind: AssetId, + AccountId, + > fungibles::Refund for UnionOf +{ + type AssetId = AssetKind; + type Balance = >::Balance; + + fn deposit_held(asset: AssetKind, who: AccountId) -> Option<(AccountId, Self::Balance)> { + match Criterion::convert(asset) { + Left(()) => None, + Right(a) => >::deposit_held(a, who), + } + } + fn refund(asset: AssetKind, who: AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => Err(DispatchError::Unavailable), + Right(a) => >::refund(a, who), + } + } +} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs index 49f6c846ccdd..7f882c1fc4fe 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -15,13 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for creating and destroying assets. +//! Traits for creating, editing and destroying assets. //! //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. -use sp_runtime::{DispatchError, DispatchResult}; - use super::Inspect; +use crate::traits::tokens::{AssetId, Balance}; +use sp_runtime::{DispatchError, DispatchResult}; /// Trait for providing the ability to create new fungible assets. pub trait Create: Inspect { @@ -34,6 +34,22 @@ pub trait Create: Inspect { ) -> DispatchResult; } +/// Trait for refunding the existence deposit of a target asset account. +/// +/// The existence deposit might by necessary and present in cases where the asset held by the +/// account is insufficient for the required storage, or when the system cannot provide a consumer +/// reference for any reason. +pub trait Refund { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + /// Scalar type for representing deposit balance of an account. + type Balance: Balance; + /// Returns the amount of account deposit and depositor address, if any. + fn deposit_held(id: Self::AssetId, who: AccountId) -> Option<(AccountId, Self::Balance)>; + /// Return the deposit (if any) of a target asset account. + fn refund(id: Self::AssetId, who: AccountId) -> DispatchResult; +} + /// Trait for providing the ability to destroy existing fungible assets. pub trait Destroy: Inspect { /// Start the destruction an existing fungible asset. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 2122fdeaed3f..8b4ea4d13cf9 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -44,7 +44,7 @@ pub use hold::{ Unbalanced as UnbalancedHold, }; pub use imbalance::{Credit, Debt, HandleImbalanceDrop, Imbalance}; -pub use lifetime::{Create, Destroy}; +pub use lifetime::{Create, Destroy, Refund}; pub use regular::{ Balanced, DecreaseIssuance, Dust, IncreaseIssuance, Inspect, Mutate, Unbalanced, }; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs index 4f95ad8368c2..3e68d6b94518 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs @@ -19,6 +19,8 @@ //! //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. +use sp_runtime::DispatchResult; + pub trait Inspect: super::Inspect { // Get owner for an AssetId. fn owner(asset: Self::AssetId) -> Option; @@ -29,3 +31,22 @@ pub trait Inspect: super::Inspect { // Get freezer for an AssetId. fn freezer(asset: Self::AssetId) -> Option; } + +/// Trait for resetting the team configuration of an existing fungible asset. +pub trait ResetTeam: super::Inspect { + /// Reset the team for the asset with the given `id`. + /// + /// ### Parameters + /// - `id`: The identifier of the asset for which the team is being reset. + /// - `owner`: The new `owner` account for the asset. + /// - `admin`: The new `admin` account for the asset. + /// - `issuer`: The new `issuer` account for the asset. + /// - `freezer`: The new `freezer` account for the asset. + fn reset_team( + id: Self::AssetId, + owner: AccountId, + admin: AccountId, + issuer: AccountId, + freezer: AccountId, + ) -> DispatchResult; +} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index c8a1ec37e781..2c7d4bab7baa 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -904,3 +904,35 @@ impl< } } } + +impl< + Left: fungibles::Inspect + fungibles::Refund, + Right: fungibles::Inspect + + fungibles::Refund>::Balance>, + Criterion: Convert< + AssetKind, + Either< + >::AssetId, + >::AssetId, + >, + >, + AssetKind: AssetId, + AccountId, + > fungibles::Refund for UnionOf +{ + type AssetId = AssetKind; + type Balance = >::Balance; + + fn deposit_held(asset: AssetKind, who: AccountId) -> Option<(AccountId, Self::Balance)> { + match Criterion::convert(asset) { + Left(a) => >::deposit_held(a, who), + Right(a) => >::deposit_held(a, who), + } + } + fn refund(asset: AssetKind, who: AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::refund(a, who), + Right(a) => >::refund(a, who), + } + } +} diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index a4dd5e491428..424acb1d550b 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -17,6 +17,7 @@ //! Miscellaneous types. +use crate::traits::Contains; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; @@ -299,6 +300,33 @@ where fn ensure_successful(_: AssetId) {} } +/// Implements [`ConversionFromAssetBalance`], allowing for a 1:1 balance conversion of the asset +/// when it meets the conditions specified by `C`. If the conditions are not met, the conversion is +/// delegated to `O`. +pub struct UnityOrOuterConversion(core::marker::PhantomData<(C, O)>); +impl + ConversionFromAssetBalance for UnityOrOuterConversion +where + C: Contains, + O: ConversionFromAssetBalance, + AssetBalance: Into, +{ + type Error = O::Error; + fn from_asset_balance( + balance: AssetBalance, + asset_id: AssetId, + ) -> Result { + if C::contains(&asset_id) { + return Ok(balance.into()); + } + O::from_asset_balance(balance, asset_id) + } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(asset_id: AssetId) { + O::ensure_successful(asset_id) + } +} + /// Trait to handle NFT locking mechanism to ensure interactions with the asset can be implemented /// downstream to extend logic of Uniques/Nfts current functionality. pub trait Locker { diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index d61558cf5363..9a2b22b81709 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -244,6 +244,11 @@ ord_parameter_types! { pub const AssetConversionOrigin: u64 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); } +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (NativeOrWithId, NativeOrWithId), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -252,8 +257,8 @@ impl pallet_asset_conversion::Config for Runtime { type Assets = UnionOf, AccountId>; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = Chain< - WithFirstAsset>, - Ascending>, + WithFirstAsset, PoolIdToAccountId>, + Ascending, PoolIdToAccountId>, >; type PoolAssetId = u32; type PoolAssets = PoolAssets; diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 544ba72141eb..2f553819b1bc 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -68,4 +68,4 @@ std = [ disable-logging = ["log/max_level_off"] # Do not report the documentation in the metadata. no-metadata-docs = ["sp-api-proc-macro/no-metadata-docs"] -frame-metadata = ["sp-api-proc-macro/frame-metadata", "sp-metadata-ir"] +frame-metadata = ["sp-metadata-ir"] diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index f5406758687a..b1bc547f3e4a 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -35,4 +35,3 @@ assert_matches = "1.3.0" default = ["std"] std = ["blake2/std"] no-metadata-docs = [] -frame-metadata = [] diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index e34e4c0e7672..cb213f2fd627 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -193,10 +193,7 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); - #[cfg(feature = "frame-metadata")] let metadata = crate::runtime_metadata::generate_decl_runtime_metadata(&decl); - #[cfg(not(feature = "frame-metadata"))] - let metadata = quote!(); let trait_api_version = get_api_version(&found_attributes)?; diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 87a381fd7bf9..2c423f8c28dd 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -821,10 +821,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let wasm_interface = generate_wasm_interface(api_impls)?; let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; - #[cfg(feature = "frame-metadata")] let runtime_metadata = crate::runtime_metadata::generate_impl_runtime_metadata(api_impls)?; - #[cfg(not(feature = "frame-metadata"))] - let runtime_metadata = quote!(); let impl_ = quote!( #base_runtime_api diff --git a/substrate/primitives/api/proc-macro/src/lib.rs b/substrate/primitives/api/proc-macro/src/lib.rs index 06e148880e97..d34f4b7f9cf6 100644 --- a/substrate/primitives/api/proc-macro/src/lib.rs +++ b/substrate/primitives/api/proc-macro/src/lib.rs @@ -25,7 +25,6 @@ mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; -#[cfg(feature = "frame-metadata")] mod runtime_metadata; mod utils; diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 41849401291e..9944927d5573 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -164,15 +164,17 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { let (impl_generics, _, where_clause) = generics.split_for_impl(); quote!( - #( #attrs )* - #[inline(always)] - pub fn runtime_metadata #impl_generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR - #where_clause - { - #crate_::metadata_ir::RuntimeApiMetadataIR { - name: #trait_name, - methods: #crate_::vec![ #( #methods, )* ], - docs: #docs, + #crate_::frame_metadata_enabled! { + #( #attrs )* + #[inline(always)] + pub fn runtime_metadata #impl_generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR + #where_clause + { + #crate_::metadata_ir::RuntimeApiMetadataIR { + name: #trait_name, + methods: #crate_::vec![ #( #methods, )* ], + docs: #docs, + } } } ) @@ -255,14 +257,16 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { - #crate_::vec![ #( #metadata, )* ] + #crate_::frame_metadata_enabled! { + #[doc(hidden)] + trait InternalImplRuntimeApis { + #[inline(always)] + fn runtime_metadata(&self) -> #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { + #crate_::vec![ #( #metadata, )* ] + } } + #[doc(hidden)] + impl InternalImplRuntimeApis for #runtime_name {} } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} )) } diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index a6570a98f1f7..d90b56058648 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -261,7 +261,6 @@ pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { } /// Extract the documentation from the provided attributes. -#[cfg(feature = "frame-metadata")] pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { use quote::ToTokens; attrs @@ -277,7 +276,6 @@ pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { } /// Filters all attributes except the cfg ones. -#[cfg(feature = "frame-metadata")] pub fn filter_cfg_attributes(attrs: &[syn::Attribute]) -> Vec { attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect() } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index a945b9f21f3c..20f989c4882e 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -838,3 +838,4 @@ decl_runtime_apis! { sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); sp_core::generate_feature_enabled_macro!(std_disabled, not(feature = "std"), $); +sp_core::generate_feature_enabled_macro!(frame_metadata_enabled, feature = "frame-metadata", $); diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml deleted file mode 100644 index 618cb645475d..000000000000 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "try-runtime-cli" -version = "0.38.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "Cli command runtime testing and dry-running" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -remote-externalities = { package = "frame-remote-externalities", path = "../../remote-externalities" } -sc-cli = { path = "../../../../client/cli" } -sc-executor = { path = "../../../../client/executor" } -sp-consensus-aura = { path = "../../../../primitives/consensus/aura" } -sp-consensus-babe = { path = "../../../../primitives/consensus/babe" } -sp-core = { path = "../../../../primitives/core" } -sp-externalities = { path = "../../../../primitives/externalities" } -sp-inherents = { path = "../../../../primitives/inherents" } -sp-io = { path = "../../../../primitives/io" } -sp-keystore = { path = "../../../../primitives/keystore" } -sp-runtime = { path = "../../../../primitives/runtime" } -sp-rpc = { path = "../../../../primitives/rpc" } -sp-state-machine = { path = "../../../../primitives/state-machine" } -sp-timestamp = { path = "../../../../primitives/timestamp" } -sp-transaction-storage-proof = { path = "../../../../primitives/transaction-storage-proof" } -sp-version = { path = "../../../../primitives/version" } -sp-debug-derive = { path = "../../../../primitives/debug-derive" } -sp-api = { path = "../../../../primitives/api" } -sp-weights = { path = "../../../../primitives/weights" } -frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } -substrate-rpc-client = { path = "../../rpc/client" } - -async-trait = "0.1.79" -clap = { version = "4.5.3", features = ["derive"] } -hex = { version = "0.4.3", default-features = false } -log = { workspace = true, default-features = true } -parity-scale-codec = "3.6.1" -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -zstd = { version = "0.12.4", default-features = false } - -[dev-dependencies] -assert_cmd = "2.0.10" -node-primitives = { path = "../../../../bin/node/primitives" } -regex = "1.7.3" -substrate-cli-test-utils = { path = "../../../../test-utils/cli" } -tempfile = "3.1.0" -tokio = "1.37" - -[features] -try-runtime = [ - "frame-try-runtime/try-runtime", - "sp-debug-derive/force-debug", - "sp-runtime/try-runtime", - "substrate-cli-test-utils/try-runtime", -] diff --git a/substrate/utils/frame/try-runtime/cli/src/block_building_info.rs b/substrate/utils/frame/try-runtime/cli/src/block_building_info.rs deleted file mode 100644 index db24d06ef0a1..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/block_building_info.rs +++ /dev/null @@ -1,152 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::BlockT; -use parity_scale_codec::Encode; -use sc_cli::Result; -use sp_consensus_aura::{Slot, SlotDuration, AURA_ENGINE_ID}; -use sp_consensus_babe::{ - digests::{PreDigest, SecondaryPlainPreDigest}, - BABE_ENGINE_ID, -}; -use sp_inherents::{InherentData, InherentDataProvider}; -use sp_runtime::{Digest, DigestItem}; -use sp_timestamp::TimestampInherentData; - -/// Something that can create inherent data providers and pre-runtime digest. -/// -/// It is possible for the caller to provide custom arguments to the callee by setting the -/// `ExtraArgs` generic parameter. -/// -/// This module already provides some convenience implementation of this trait for closures. So, it -/// should not be required to implement it directly. -#[async_trait::async_trait] -pub trait BlockBuildingInfoProvider { - type InherentDataProviders: InherentDataProvider; - - async fn get_inherent_providers_and_pre_digest( - &self, - parent_hash: Block::Hash, - extra_args: ExtraArgs, - ) -> Result<(Self::InherentDataProviders, Vec)>; -} - -#[async_trait::async_trait] -impl BlockBuildingInfoProvider for F -where - Block: BlockT, - F: Fn(Block::Hash, ExtraArgs) -> Fut + Sync + Send, - Fut: std::future::Future)>> + Send + 'static, - IDP: InherentDataProvider + 'static, - ExtraArgs: Send + 'static, -{ - type InherentDataProviders = IDP; - - async fn get_inherent_providers_and_pre_digest( - &self, - parent: Block::Hash, - extra_args: ExtraArgs, - ) -> Result<(Self::InherentDataProviders, Vec)> { - (*self)(parent, extra_args).await - } -} - -/// Provides [`BlockBuildingInfoProvider`] implementation for chains that include timestamp inherent -/// and use Aura for a block production. -/// -/// It depends only on the expected block production frequency, i.e. `blocktime_millis`. -pub fn timestamp_with_aura_info( - blocktime_millis: u64, -) -> impl BlockBuildingInfoProvider> { - move |_, maybe_prev_info: Option<(InherentData, Digest)>| async move { - let timestamp_idp = match maybe_prev_info { - Some((inherent_data, _)) => sp_timestamp::InherentDataProvider::new( - inherent_data.timestamp_inherent_data().unwrap().unwrap() + blocktime_millis, - ), - None => sp_timestamp::InherentDataProvider::from_system_time(), - }; - - let slot = - Slot::from_timestamp(*timestamp_idp, SlotDuration::from_millis(blocktime_millis)); - let digest = vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())]; - - Ok((timestamp_idp, digest)) - } -} - -/// Provides [`BlockBuildingInfoProvider`] implementation for chains that include timestamp inherent -/// and use Babe for a block production. -/// -/// It depends only on the expected block production frequency, i.e. `blocktime_millis`. -pub fn timestamp_with_babe_info( - blocktime_millis: u64, -) -> impl BlockBuildingInfoProvider> { - move |_, maybe_prev_info: Option<(InherentData, Digest)>| async move { - let timestamp_idp = match maybe_prev_info { - Some((inherent_data, _)) => sp_timestamp::InherentDataProvider::new( - inherent_data.timestamp_inherent_data().unwrap().unwrap() + blocktime_millis, - ), - None => sp_timestamp::InherentDataProvider::from_system_time(), - }; - - let slot = - Slot::from_timestamp(*timestamp_idp, SlotDuration::from_millis(blocktime_millis)); - let slot_idp = sp_consensus_babe::inherents::InherentDataProvider::new(slot); - - let digest = vec![DigestItem::PreRuntime( - BABE_ENGINE_ID, - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0 }) - .encode(), - )]; - - Ok(((slot_idp, timestamp_idp), digest)) - } -} - -/// Provides [`BlockBuildingInfoProvider`] implementation for chains that use: -/// - timestamp inherent, -/// - Babe for a block production (inherent + digest), -/// - uncles inherent, -/// - storage proof inherent -/// -/// It depends only on the expected block production frequency, i.e. `blocktime_millis`. -pub fn substrate_info( - blocktime_millis: u64, -) -> impl BlockBuildingInfoProvider> { - move |_, maybe_prev_info: Option<(InherentData, Digest)>| async move { - let timestamp_idp = match maybe_prev_info { - Some((inherent_data, _)) => sp_timestamp::InherentDataProvider::new( - inherent_data.timestamp_inherent_data().unwrap().unwrap() + blocktime_millis, - ), - None => sp_timestamp::InherentDataProvider::from_system_time(), - }; - - let slot = - Slot::from_timestamp(*timestamp_idp, SlotDuration::from_millis(blocktime_millis)); - let slot_idp = sp_consensus_babe::inherents::InherentDataProvider::new(slot); - - let storage_proof_idp = sp_transaction_storage_proof::InherentDataProvider::new(None); - - let digest = vec![DigestItem::PreRuntime( - BABE_ENGINE_ID, - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0 }) - .encode(), - )]; - - Ok(((slot_idp, timestamp_idp, storage_proof_idp), digest)) - } -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs b/substrate/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs deleted file mode 100644 index 102336d64421..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/create_snapshot.rs +++ /dev/null @@ -1,78 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{build_executor, LiveState, SharedParams, State, LOG_TARGET}; -use sc_executor::sp_wasm_interface::HostFunctions; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, StateApi}; - -/// Configurations of the [`crate::Command::CreateSnapshot`]. -#[derive(Debug, Clone, clap::Parser)] -pub struct CreateSnapshotCmd { - /// The source of the snapshot. Must be a remote node. - #[clap(flatten)] - pub from: LiveState, - - /// The snapshot path to write to. - /// - /// If not provided `-@.snap` will be used. - pub snapshot_path: Option, -} - -/// inner command for `Command::CreateSnapshot`. -pub(crate) async fn create_snapshot( - shared: SharedParams, - command: CreateSnapshotCmd, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Hash: serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - HostFns: HostFunctions, -{ - let snapshot_path = command.snapshot_path; - if !matches!(shared.runtime, crate::Runtime::Existing) { - return Err("creating a snapshot is only possible with --runtime existing.".into()) - } - - let path = match snapshot_path { - Some(path) => path, - None => { - let rpc = ws_client(&command.from.uri).await.unwrap(); - let remote_spec = StateApi::::runtime_version(&rpc, None).await.unwrap(); - let path_str = format!( - "{}-{}@{}.snap", - remote_spec.spec_name.to_lowercase(), - remote_spec.spec_version, - command.from.at.clone().unwrap_or("latest".to_owned()) - ); - log::info!(target: LOG_TARGET, "snapshot path not provided (-s), using '{}'", path_str); - path_str.into() - }, - }; - - let executor = build_executor::(&shared); - let _ = State::Live(command.from) - .into_ext::(&shared, &executor, Some(path.into()), false) - .await?; - - Ok(()) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/substrate/utils/frame/try-runtime/cli/src/commands/execute_block.rs deleted file mode 100644 index 1f1b6ec7d9b9..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ /dev/null @@ -1,170 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - build_executor, full_extensions, rpc_err_handler, state_machine_call_with_proof, LiveState, - SharedParams, State, LOG_TARGET, -}; -use parity_scale_codec::Encode; -use sc_executor::sp_wasm_interface::HostFunctions; -use sp_rpc::{list::ListOrValue, number::NumberOrHex}; -use sp_runtime::{ - generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; -use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi}; - -/// Configurations of the [`crate::Command::ExecuteBlock`]. -/// -/// This will always call into `TryRuntime_execute_block`, which can optionally skip the state-root -/// check (useful for trying a unreleased runtime), and can execute runtime sanity checks as well. -#[derive(Debug, Clone, clap::Parser)] -pub struct ExecuteBlockCmd { - /// Which try-state targets to execute when running this command. - /// - /// Expected values: - /// - `all` - /// - `none` - /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. - /// `Staking, System`). - /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a - /// round-robin fashion. - #[arg(long, default_value = "all")] - pub try_state: frame_try_runtime::TryStateSelect, - - /// The ws uri from which to fetch the block. - /// - /// This will always fetch the next block of whatever `state` is referring to, because this is - /// the only sensible combination. In other words, if you have the state of block `n`, you - /// should execute block `n+1` on top of it. - /// - /// If `state` is `Live`, this can be ignored and the same uri is used for both. - #[arg( - long, - value_parser = crate::parse::url - )] - pub block_ws_uri: Option, - - /// The state type to use. - #[command(subcommand)] - pub state: State, -} - -impl ExecuteBlockCmd { - fn block_ws_uri(&self) -> String - where - ::Err: Debug, - { - match (&self.block_ws_uri, &self.state) { - (Some(block_ws_uri), State::Snap { .. }) => block_ws_uri.to_owned(), - (Some(block_ws_uri), State::Live { .. }) => { - log::error!(target: LOG_TARGET, "--block-uri is provided while state type is live, Are you sure you know what you are doing?"); - block_ws_uri.to_owned() - }, - (None, State::Live(LiveState { uri, .. })) => uri.clone(), - (None, State::Snap { .. }) => { - panic!("either `--block-uri` must be provided, or state must be `live`"); - }, - } - } -} - -pub(crate) async fn execute_block( - shared: SharedParams, - command: ExecuteBlockCmd, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - ::Err: Debug, - Block::Hash: serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, - as TryInto>::Error: Debug, - HostFns: HostFunctions, -{ - let executor = build_executor::(&shared); - let ext = command.state.into_ext::(&shared, &executor, None, true).await?; - - // get the block number associated with this block. - let block_ws_uri = command.block_ws_uri::(); - let rpc = ws_client(&block_ws_uri).await?; - let next_hash = next_hash_of::(&rpc, ext.block_hash).await?; - - log::info!(target: LOG_TARGET, "fetching next block: {:?} ", next_hash); - - let block = ChainApi::<(), Block::Hash, Block::Header, SignedBlock>::block( - &rpc, - Some(next_hash), - ) - .await - .map_err(rpc_err_handler)? - .expect("header exists, block should also exist; qed") - .block; - - // A digest item gets added when the runtime is processing the block, so we need to pop - // the last one to be consistent with what a gossiped block would contain. - let (mut header, extrinsics) = block.deconstruct(); - header.digest_mut().pop(); - let block = Block::new(header, extrinsics); - - // for now, hardcoded for the sake of simplicity. We might customize them one day. - let state_root_check = false; - let signature_check = false; - let payload = (block.clone(), state_root_check, signature_check, command.try_state).encode(); - - let _ = state_machine_call_with_proof::( - &ext, - &executor, - "TryRuntime_execute_block", - &payload, - full_extensions(executor.clone()), - shared.export_proof, - )?; - - Ok(()) -} - -pub(crate) async fn next_hash_of( - rpc: &substrate_rpc_client::WsClient, - hash: Block::Hash, -) -> sc_cli::Result -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, -{ - let number = ChainApi::<(), Block::Hash, Block::Header, ()>::header(rpc, Some(hash)) - .await - .map_err(rpc_err_handler) - .and_then(|maybe_header| maybe_header.ok_or("header_not_found").map(|h| *h.number()))?; - - let next = number + sp_runtime::traits::One::one(); - - let next_hash = match ChainApi::<(), Block::Hash, Block::Header, ()>::block_hash( - rpc, - Some(ListOrValue::Value(NumberOrHex::Number( - next.try_into().map_err(|_| "failed to convert number to block number")?, - ))), - ) - .await - .map_err(rpc_err_handler)? - { - ListOrValue::Value(t) => t.expect("value passed in; value comes out; qed"), - _ => unreachable!(), - }; - - Ok(next_hash) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs b/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs deleted file mode 100644 index f1dee16debe7..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs +++ /dev/null @@ -1,262 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - block_building_info::BlockBuildingInfoProvider, build_executor, full_extensions, - rpc_err_handler, state_machine_call, BlockT, LiveState, SharedParams, State, -}; -use parity_scale_codec::{Decode, Encode}; -use sc_cli::Result; -use sc_executor::{sp_wasm_interface::HostFunctions, WasmExecutor}; -use serde::de::DeserializeOwned; -use sp_core::H256; -use sp_inherents::{InherentData, InherentDataProvider}; -use sp_runtime::{ - traits::{HashingFor, Header, NumberFor, One}, - Digest, -}; -use sp_state_machine::TestExternalities; -use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi}; - -/// Configurations of the [`crate::Command::FastForward`]. -#[derive(Debug, Clone, clap::Parser)] -pub struct FastForwardCmd { - /// How many blocks should be processed. If `None`, then blocks will be produced and processed - /// in a loop. - #[arg(long)] - n_blocks: Option, - - /// The state type to use. - #[command(subcommand)] - state: State, - - /// The ws uri from which to fetch the block. - /// - /// If `state` is `Live`, this is ignored. Otherwise, it must not be empty. - #[arg(long, value_parser = crate::parse::url)] - block_ws_uri: Option, - - /// Which try-state targets to execute when running this command. - /// - /// Expected values: - /// - `all` - /// - `none` - /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. - /// `Staking, System`). - /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a - /// round-robin fashion. - #[arg(long, default_value = "all")] - try_state: frame_try_runtime::TryStateSelect, -} - -impl FastForwardCmd { - fn block_ws_uri(&self) -> &str { - match self.state { - State::Live(LiveState { ref uri, .. }) => &uri, - _ => self - .block_ws_uri - .as_ref() - .expect("Either `--block-uri` must be provided, or state must be `live`"), - } - } -} - -/// Read the block number corresponding to `hash` with an RPC call to `ws_uri`. -async fn get_block_number( - hash: Block::Hash, - ws_uri: &str, -) -> Result> -where - Block::Header: DeserializeOwned, -{ - let rpc = ws_client(ws_uri).await?; - Ok(ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(hash)) - .await - .map_err(rpc_err_handler) - .and_then(|maybe_header| maybe_header.ok_or("header_not_found").map(|h| *h.number()))?) -} - -/// Call `method` with `data` and return the result. `externalities` will not change. -fn dry_run( - externalities: &TestExternalities>, - executor: &WasmExecutor, - method: &'static str, - data: &[u8], -) -> Result { - let (_, result) = state_machine_call::( - externalities, - executor, - method, - data, - full_extensions(executor.clone()), - )?; - - Ok(::decode(&mut &*result)?) -} - -/// Call `method` with `data` and actually save storage changes to `externalities`. -async fn run( - externalities: &mut TestExternalities>, - executor: &WasmExecutor, - method: &'static str, - data: &[u8], -) -> Result<()> { - let (mut changes, _) = state_machine_call::( - externalities, - executor, - method, - data, - full_extensions(executor.clone()), - )?; - - let storage_changes = - changes.drain_storage_changes(&externalities.backend, externalities.state_version)?; - - externalities - .backend - .apply_transaction(storage_changes.transaction_storage_root, storage_changes.transaction); - - Ok(()) -} - -/// Produce next empty block. -async fn next_empty_block< - Block: BlockT, - HostFns: HostFunctions, - BBIP: BlockBuildingInfoProvider>, ->( - externalities: &mut TestExternalities>, - executor: &WasmExecutor, - parent_height: NumberFor, - parent_hash: Block::Hash, - block_building_info_provider: &Option, - previous_block_building_info: Option<(InherentData, Digest)>, -) -> Result<(Block, Option<(InherentData, Digest)>)> { - let (maybe_inherent_data, pre_digest) = match &block_building_info_provider { - None => (None, Default::default()), - Some(bbip) => { - let (inherent_data_provider, pre_digest) = bbip - .get_inherent_providers_and_pre_digest(parent_hash, previous_block_building_info) - .await?; - let inherent_data = inherent_data_provider - .create_inherent_data() - .await - .map_err(|e| sc_cli::Error::Input(format!("I don't know how to convert {e:?}")))?; - - (Some(inherent_data), Digest { logs: pre_digest }) - }, - }; - - let header = Block::Header::new( - parent_height + One::one(), - Default::default(), - Default::default(), - parent_hash, - pre_digest.clone(), - ); - let mut extrinsics = >::new(); - - run::(externalities, executor, "Core_initialize_block", &header.encode()).await?; - - if let Some(ref inherent_data) = maybe_inherent_data { - extrinsics = dry_run::, Block, _>( - externalities, - executor, - "BlockBuilder_inherent_extrinsics", - &inherent_data.encode(), - )?; - } - - for xt in &extrinsics { - run::(externalities, executor, "BlockBuilder_apply_extrinsic", &xt.encode()) - .await?; - } - - let header = dry_run::( - externalities, - executor, - "BlockBuilder_finalize_block", - &[0u8; 0], - )?; - - run::(externalities, executor, "BlockBuilder_finalize_block", &[0u8; 0]).await?; - - Ok((Block::new(header, extrinsics), (maybe_inherent_data.map(|id| (id, pre_digest))))) -} - -pub(crate) async fn fast_forward( - shared: SharedParams, - command: FastForwardCmd, - block_building_info_provider: Option, -) -> Result<()> -where - Block: BlockT + DeserializeOwned, - Block::Header: DeserializeOwned, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - HostFns: HostFunctions, - BBIP: BlockBuildingInfoProvider>, -{ - let executor = build_executor::(&shared); - let ext = command.state.into_ext::(&shared, &executor, None, true).await?; - - let mut last_block_hash = ext.block_hash; - let mut last_block_number = - get_block_number::(last_block_hash, command.block_ws_uri()).await?; - let mut prev_block_building_info = None; - - let mut ext = ext.inner_ext; - - for _ in 1..=command.n_blocks.unwrap_or(u64::MAX) { - // We are saving state before we overwrite it while producing new block. - let backend = ext.as_backend(); - - log::info!("Producing new empty block at height {:?}", last_block_number + One::one()); - - let (next_block, new_block_building_info) = next_empty_block::( - &mut ext, - &executor, - last_block_number, - last_block_hash, - &block_building_info_provider, - prev_block_building_info, - ) - .await?; - - log::info!("Produced a new block: {:?}", next_block.header()); - - // And now we restore previous state. - ext.backend = backend; - - let state_root_check = true; - let signature_check = true; - let payload = - (next_block.clone(), state_root_check, signature_check, command.try_state.clone()) - .encode(); - run::(&mut ext, &executor, "TryRuntime_execute_block", &payload).await?; - - log::info!("Executed the new block"); - - prev_block_building_info = new_block_building_info; - last_block_hash = next_block.hash(); - last_block_number += One::one(); - } - - Ok(()) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs deleted file mode 100644 index 53db5e643463..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ /dev/null @@ -1,203 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - build_executor, full_extensions, parse, rpc_err_handler, state_machine_call_with_proof, - LiveState, SharedParams, State, LOG_TARGET, -}; -use parity_scale_codec::{Decode, Encode}; -use sc_executor::sp_wasm_interface::HostFunctions; -use serde::{de::DeserializeOwned, Serialize}; -use sp_core::H256; -use sp_runtime::{ - generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; -use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi, FinalizedHeaders, Subscription, WsClient}; - -const SUB: &str = "chain_subscribeFinalizedHeads"; -const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; - -/// Configurations of the [`crate::Command::FollowChain`]. -#[derive(Debug, Clone, clap::Parser)] -pub struct FollowChainCmd { - /// The url to connect to. - #[arg(short, long, value_parser = parse::url)] - pub uri: String, - - /// If set, then the state root check is enabled. - #[arg(long)] - pub state_root_check: bool, - - /// Which try-state targets to execute when running this command. - /// - /// Expected values: - /// - `all` - /// - `none` - /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. - /// `Staking, System`). - /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a - /// round-robin fashion. - #[arg(long, default_value = "all")] - pub try_state: frame_try_runtime::TryStateSelect, - - /// If present, a single connection to a node will be kept and reused for fetching blocks. - #[arg(long)] - pub keep_connection: bool, -} - -/// Start listening for with `SUB` at `url`. -/// -/// Returns a pair `(client, subscription)` - `subscription` alone will be useless, because it -/// relies on the related alive `client`. -async fn start_subscribing( - url: &str, -) -> sc_cli::Result<(WsClient, Subscription

)> { - let client = ws_client(url).await.map_err(|e| sc_cli::Error::Application(e.into()))?; - - log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); - - let sub = ChainApi::<(), (), Header, ()>::subscribe_finalized_heads(&client) - .await - .map_err(|e| sc_cli::Error::Application(e.into()))?; - Ok((client, sub)) -} - -pub(crate) async fn follow_chain( - shared: SharedParams, - command: FollowChainCmd, -) -> sc_cli::Result<()> -where - Block: BlockT + DeserializeOwned, - Block::Header: DeserializeOwned, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - HostFns: HostFunctions, -{ - let (rpc, subscription) = start_subscribing::(&command.uri).await?; - let mut finalized_headers: FinalizedHeaders = - FinalizedHeaders::new(&rpc, subscription); - - let mut maybe_state_ext = None; - let executor = build_executor::(&shared); - - while let Some(header) = finalized_headers.next().await { - let hash = header.hash(); - let number = header.number(); - - let block = - ChainApi::<(), Block::Hash, Block::Header, SignedBlock>::block(&rpc, Some(hash)) - .await - .or_else(|e| { - if matches!(e, substrate_rpc_client::Error::ParseError(_)) { - log::error!( - target: LOG_TARGET, - "failed to parse the block format of remote against the local \ - codebase. The block format has changed, and follow-chain cannot run in \ - this case. Try running this command in a branch of your codebase that - has the same block format as the remote chain. For now, we replace the \ - block with an empty one." - ); - } - Err(rpc_err_handler(e)) - })? - .expect("if header exists, block should also exist.") - .block; - - log::debug!( - target: LOG_TARGET, - "new block event: {:?} => {:?}, extrinsics: {}", - hash, - number, - block.extrinsics().len() - ); - - // create an ext at the state of this block, whatever is the first subscription event. - if maybe_state_ext.is_none() { - let state = State::Live(LiveState { - uri: command.uri.clone(), - // a bit dodgy, we have to un-parse the has to a string again and re-parse it - // inside. - at: Some(hex::encode(header.parent_hash().encode())), - pallet: vec![], - child_tree: true, - }); - let ext = state.into_ext::(&shared, &executor, None, true).await?; - maybe_state_ext = Some(ext); - } - - let state_ext = - maybe_state_ext.as_mut().expect("state_ext either existed or was just created"); - - let result = state_machine_call_with_proof::( - state_ext, - &executor, - "TryRuntime_execute_block", - (block, command.state_root_check, true, command.try_state.clone()) - .encode() - .as_ref(), - full_extensions(executor.clone()), - shared - .export_proof - .as_ref() - .map(|path| path.as_path().join(&format!("{}.json", number))), - ); - - if let Err(why) = result { - log::error!( - target: LOG_TARGET, - "failed to execute block {:?} due to {:?}", - number, - why - ); - continue - } - - let (mut changes, encoded_result) = result.expect("checked to be Ok; qed"); - - let consumed_weight = ::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode weight: {:?}", e))?; - - let storage_changes = changes - .drain_storage_changes( - &state_ext.backend, - // Note that in case a block contains a runtime upgrade, state version could - // potentially be incorrect here, this is very niche and would only result in - // unaligned roots, so this use case is ignored for now. - state_ext.state_version, - ) - .unwrap(); - - state_ext.backend.apply_transaction( - storage_changes.transaction_storage_root, - storage_changes.transaction, - ); - - log::info!( - target: LOG_TARGET, - "executed block {}, consumed weight {}, new storage root {:?}", - number, - consumed_weight, - state_ext.as_backend().root(), - ); - } - - log::error!(target: LOG_TARGET, "ws subscription must have terminated."); - Ok(()) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/substrate/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs deleted file mode 100644 index ac95384fb8aa..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ /dev/null @@ -1,102 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - build_executor, commands::execute_block::next_hash_of, full_extensions, parse, rpc_err_handler, - state_machine_call, LiveState, SharedParams, State, LOG_TARGET, -}; -use parity_scale_codec::Encode; -use sc_executor::sp_wasm_interface::HostFunctions; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr}; -use substrate_rpc_client::{ws_client, ChainApi}; - -/// Configurations of the [`crate::Command::OffchainWorker`]. -#[derive(Debug, Clone, clap::Parser)] -pub struct OffchainWorkerCmd { - /// The ws uri from which to fetch the header. - /// - /// If the `live` state type is being used, then this can be omitted, and is equal to whatever - /// the `state::uri` is. Only use this (with care) when combined with a snapshot. - #[arg( - long, - value_parser = parse::url - )] - pub header_ws_uri: Option, - - /// The state type to use. - #[command(subcommand)] - pub state: State, -} - -impl OffchainWorkerCmd { - fn header_ws_uri(&self) -> String - where - ::Err: Debug, - { - match (&self.header_ws_uri, &self.state) { - (Some(header_ws_uri), State::Snap { .. }) => header_ws_uri.to_owned(), - (Some(header_ws_uri), State::Live { .. }) => { - log::error!(target: LOG_TARGET, "--header-uri is provided while state type is live, this will most likely lead to a nonsensical result."); - header_ws_uri.to_owned() - }, - (None, State::Live(LiveState { uri, .. })) => uri.clone(), - (None, State::Snap { .. }) => { - panic!("either `--header-uri` must be provided, or state must be `live`"); - }, - } - } -} - -pub(crate) async fn offchain_worker( - shared: SharedParams, - command: OffchainWorkerCmd, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, - ::Err: Debug, - NumberFor: FromStr, - as FromStr>::Err: Debug, - HostFns: HostFunctions, -{ - let executor = build_executor(&shared); - // we first build the externalities with the remote code. - let ext = command.state.into_ext::(&shared, &executor, None, true).await?; - - let header_ws_uri = command.header_ws_uri::(); - - let rpc = ws_client(&header_ws_uri).await?; - let next_hash = next_hash_of::(&rpc, ext.block_hash).await?; - log::info!(target: LOG_TARGET, "fetching next header: {:?} ", next_hash); - - let header = ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(next_hash)) - .await - .map_err(rpc_err_handler) - .map(|maybe_header| maybe_header.ok_or("Header does not exist"))??; - let payload = header.encode(); - - let _ = state_machine_call::( - &ext, - &executor, - "OffchainWorkerApi_offchain_worker", - &payload, - full_extensions(executor.clone()), - )?; - - Ok(()) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/substrate/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs deleted file mode 100644 index 67988a3d1aad..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ /dev/null @@ -1,89 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{build_executor, state_machine_call_with_proof, SharedParams, State, LOG_TARGET}; -use frame_try_runtime::UpgradeCheckSelect; -use parity_scale_codec::{Decode, Encode}; -use sc_executor::sp_wasm_interface::HostFunctions; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_weights::Weight; -use std::{fmt::Debug, str::FromStr}; - -/// Configurations of the [`crate::Command::OnRuntimeUpgrade`]. -#[derive(Debug, Clone, clap::Parser)] -pub struct OnRuntimeUpgradeCmd { - /// The state type to use. - #[command(subcommand)] - pub state: State, - - /// Select which optional checks to perform. Selects all when no value is given. - /// - /// - `none`: Perform no checks. - /// - `all`: Perform all checks (default when --checks is present with no value). - /// - `pre-and-post`: Perform pre- and post-upgrade checks (default when the arg is not - /// present). - /// - `try-state`: Perform the try-state checks. - /// - /// Performing any checks will potentially invalidate the measured PoV/Weight. - // NOTE: The clap attributes make it backwards compatible with the previous `--checks` flag. - #[clap(long, - default_value = "pre-and-post", - default_missing_value = "all", - num_args = 0..=1, - require_equals = true, - verbatim_doc_comment)] - pub checks: UpgradeCheckSelect, -} - -pub(crate) async fn on_runtime_upgrade( - shared: SharedParams, - command: OnRuntimeUpgradeCmd, -) -> sc_cli::Result<()> -where - Block: BlockT + serde::de::DeserializeOwned, - ::Err: Debug, - Block::Header: serde::de::DeserializeOwned, - NumberFor: FromStr, - as FromStr>::Err: Debug, - HostFns: HostFunctions, -{ - let executor = build_executor(&shared); - let ext = command.state.into_ext::(&shared, &executor, None, true).await?; - - let (_, encoded_result) = state_machine_call_with_proof::( - &ext, - &executor, - "TryRuntime_on_runtime_upgrade", - command.checks.encode().as_ref(), - Default::default(), // we don't really need any extensions here. - shared.export_proof, - )?; - - let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode weight: {:?}", e))?; - - log::info!( - target: LOG_TARGET, - "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = ({} ps, {} byte), total weight = ({} ps, {} byte) ({:.2} %, {:.2} %).", - weight.ref_time(), weight.proof_size(), - total_weight.ref_time(), total_weight.proof_size(), - (weight.ref_time() as f64 / total_weight.ref_time().max(1) as f64) * 100.0, - (weight.proof_size() as f64 / total_weight.proof_size().max(1) as f64) * 100.0, - ); - - Ok(()) -} diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs deleted file mode 100644 index 606cf8a1e144..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/lib.rs +++ /dev/null @@ -1,701 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Try-runtime -//! -//! Substrate's `try-runtime` subcommand has been migrated to a [standalone -//! CLI](https://github.com/paritytech/try-runtime-cli). -//! -//! It is no longer maintained here and will be removed in the future. - -#![cfg(feature = "try-runtime")] - -use crate::block_building_info::BlockBuildingInfoProvider; -use parity_scale_codec::Decode; -use remote_externalities::{ - Builder, Mode, OfflineConfig, OnlineConfig, RemoteExternalities, SnapshotConfig, -}; -use sc_cli::{ - execution_method_from_cli, CliConfiguration, RuntimeVersion, WasmExecutionMethod, - WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - DEFAULT_WASM_EXECUTION_METHOD, -}; -use sc_executor::{ - sp_wasm_interface::HostFunctions, HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, -}; -use sp_core::{ - hexdisplay::HexDisplay, - offchain::{ - testing::{TestOffchainExt, TestTransactionPoolExt}, - OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, - }, - storage::well_known_keys, - traits::{CallContext, ReadRuntimeVersion, ReadRuntimeVersionExt}, - twox_128, H256, -}; -use sp_externalities::Extensions; -use sp_inherents::InherentData; -use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; -use sp_runtime::{ - traits::{BlakeTwo256, Block as BlockT, Hash as HashT, HashingFor, NumberFor}, - DeserializeOwned, Digest, -}; -use sp_state_machine::{ - CompactProof, OverlayedChanges, StateMachine, TestExternalities, TrieBackendBuilder, -}; -use sp_version::StateVersion; -use std::{fmt::Debug, path::PathBuf, str::FromStr}; - -pub mod block_building_info; -pub mod commands; -pub(crate) mod parse; -pub(crate) const LOG_TARGET: &str = "try-runtime::cli"; - -/// Possible commands of `try-runtime`. -#[derive(Debug, Clone, clap::Subcommand)] -pub enum Command { - /// Execute the migrations of the given runtime - /// - /// This uses a custom runtime api call, namely "TryRuntime_on_runtime_upgrade". The code path - /// only triggers all of the `on_runtime_upgrade` hooks in the runtime, and optionally - /// `try_state`. - /// - /// See [`frame_try_runtime::TryRuntime`] and - /// [`commands::on_runtime_upgrade::OnRuntimeUpgradeCmd`] for more information. - OnRuntimeUpgrade(commands::on_runtime_upgrade::OnRuntimeUpgradeCmd), - - /// Executes the given block against some state. - /// - /// This uses a custom runtime api call, namely "TryRuntime_execute_block". Some checks, such - /// as state-root and signature checks are always disabled, and additional checks like - /// `try-state` can be enabled. - /// - /// See [`frame_try_runtime::TryRuntime`] and [`commands::execute_block::ExecuteBlockCmd`] for - /// more information. - ExecuteBlock(commands::execute_block::ExecuteBlockCmd), - - /// Executes *the offchain worker hooks* of a given block against some state. - /// - /// This executes the same runtime api as normal block import, namely - /// `OffchainWorkerApi_offchain_worker`. - /// - /// See [`frame_try_runtime::TryRuntime`] and [`commands::offchain_worker::OffchainWorkerCmd`] - /// for more information. - OffchainWorker(commands::offchain_worker::OffchainWorkerCmd), - - /// Follow the given chain's finalized blocks and apply all of its extrinsics. - /// - /// This is essentially repeated calls to [`Command::ExecuteBlock`]. - /// - /// This allows the behavior of a new runtime to be inspected over a long period of time, with - /// realistic transactions coming as input. - /// - /// NOTE: this does NOT execute the offchain worker hooks of mirrored blocks. This might be - /// added in the future. - /// - /// This does not support snapshot states, and can only work with a remote chain. Upon first - /// connections, starts listening for finalized block events. Upon first block notification, it - /// initializes the state from the remote node, and starts applying that block, plus all the - /// blocks that follow, to the same growing state. - /// - /// This can only work if the block format between the remote chain and the new runtime being - /// tested has remained the same, otherwise block decoding might fail. - FollowChain(commands::follow_chain::FollowChainCmd), - - /// Produce a series of empty, consecutive blocks and execute them one-by-one. - /// - /// To compare it with [`Command::FollowChain`]: - /// - we don't have the delay of the original blocktime (for Polkadot 6s), but instead, we - /// execute every block immediately - /// - the only data that will be put into blocks are pre-runtime digest items and inherent - /// extrinsics; both things should be defined in your node CLI handling level - FastForward(commands::fast_forward::FastForwardCmd), - - /// Create a new snapshot file. - CreateSnapshot(commands::create_snapshot::CreateSnapshotCmd), -} - -#[derive(Debug, Clone)] -pub enum Runtime { - /// Use the given path to the wasm binary file. - /// - /// It must have been compiled with `try-runtime`. - Path(PathBuf), - - /// Use the code of the remote node, or the snapshot. - /// - /// In almost all cases, this is not what you want, because the code in the remote node does - /// not have any of the try-runtime custom runtime APIs. - Existing, -} - -impl FromStr for Runtime { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(match s.to_lowercase().as_ref() { - "existing" => Runtime::Existing, - x @ _ => Runtime::Path(x.into()), - }) - } -} - -/// Shared parameters of the `try-runtime` commands -#[derive(Debug, Clone, clap::Parser)] -#[group(skip)] -pub struct SharedParams { - /// Shared parameters of substrate cli. - /// - /// TODO: this is only needed because try-runtime is embedded in the substrate CLI. It should - /// go away. - #[allow(missing_docs)] - #[clap(flatten)] - pub shared_params: sc_cli::SharedParams, - - /// The runtime to use. - /// - /// Must be a path to a wasm blob, compiled with `try-runtime` feature flag. - /// - /// Or, `existing`, indicating that you don't want to overwrite the runtime. This will use - /// whatever comes from the remote node, or the snapshot file. This will most likely not work - /// against a remote node, as no (sane) blockchain should compile its onchain wasm with - /// `try-runtime` feature. - #[arg(long)] - pub runtime: Runtime, - - /// Type of wasm execution used. - #[arg( - long = "wasm-execution", - value_name = "METHOD", - value_enum, - ignore_case = true, - default_value_t = DEFAULT_WASM_EXECUTION_METHOD, - )] - pub wasm_method: WasmExecutionMethod, - - /// The WASM instantiation method to use. - /// - /// Only has an effect when `wasm-execution` is set to `compiled`. - #[arg( - long = "wasm-instantiation-strategy", - value_name = "STRATEGY", - default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - value_enum, - )] - pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, - - /// The number of 64KB pages to allocate for Wasm execution. Defaults to - /// [`sc_service::Configuration.default_heap_pages`]. - #[arg(long)] - pub heap_pages: Option, - - /// Path to a file to export the storage proof into (as a JSON). - /// If several blocks are executed, the path is interpreted as a folder - /// where one file per block will be written (named `{block_number}-{block_hash}`). - #[clap(long)] - pub export_proof: Option, - - /// Overwrite the `state_version`. - /// - /// Otherwise `remote-externalities` will automatically set the correct state version. - #[arg(long, value_parser = parse::state_version)] - pub overwrite_state_version: Option, -} - -/// Our `try-runtime` command. -/// -/// See [`Command`] for more info. -#[derive(Debug, Clone, clap::Parser)] -pub struct TryRuntimeCmd { - #[clap(flatten)] - pub shared: SharedParams, - - #[command(subcommand)] - pub command: Command, -} - -/// A `Live` variant [`State`] -#[derive(Debug, Clone, clap::Args)] -pub struct LiveState { - /// The url to connect to. - #[arg( - short, - long, - value_parser = parse::url, - )] - uri: String, - - /// The block hash at which to fetch the state. - /// - /// If non provided, then the latest finalized head is used. - #[arg( - short, - long, - value_parser = parse::hash, - )] - at: Option, - - /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will - /// be scraped. - #[arg(short, long, num_args = 1..)] - pallet: Vec, - - /// Fetch the child-keys as well. - /// - /// Default is `false`, if specific `--pallets` are specified, `true` otherwise. In other - /// words, if you scrape the whole state the child tree data is included out of the box. - /// Otherwise, it must be enabled explicitly using this flag. - #[arg(long)] - child_tree: bool, -} - -/// The source of runtime *state* to use. -#[derive(Debug, Clone, clap::Subcommand)] -pub enum State { - /// Use a state snapshot as the source of runtime state. - Snap { - #[arg(short, long)] - snapshot_path: PathBuf, - }, - - /// Use a live chain as the source of runtime state. - Live(LiveState), -} - -impl State { - /// Create the [`remote_externalities::RemoteExternalities`] using [`remote-externalities`] from - /// self. - /// - /// This will override the code as it sees fit based on [`SharedParams::Runtime`]. It will also - /// check the spec-version and name. - pub(crate) async fn into_ext( - &self, - shared: &SharedParams, - executor: &WasmExecutor, - state_snapshot: Option, - try_runtime_check: bool, - ) -> sc_cli::Result>> - where - Block::Header: DeserializeOwned, - ::Err: Debug, - { - let builder = match self { - State::Snap { snapshot_path } => - Builder::::new().mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new(snapshot_path), - })), - State::Live(LiveState { pallet, uri, at, child_tree }) => { - let at = match at { - Some(at_str) => Some(hash_of::(at_str)?), - None => None, - }; - Builder::::new().mode(Mode::Online(OnlineConfig { - at, - transport: uri.to_owned().into(), - state_snapshot, - pallets: pallet.clone(), - child_trie: *child_tree, - hashed_keys: vec![ - // we always download the code, but we almost always won't use it, based on - // `Runtime`. - well_known_keys::CODE.to_vec(), - // we will always download this key, since it helps detect if we should do - // runtime migration or not. - [twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat(), - [twox_128(b"System"), twox_128(b"Number")].concat(), - ], - hashed_prefixes: vec![], - })) - }, - }; - - // possibly overwrite the state version, should hardly be needed. - let builder = if let Some(state_version) = shared.overwrite_state_version { - log::warn!( - target: LOG_TARGET, - "overwriting state version to {:?}, you better know what you are doing.", - state_version - ); - builder.overwrite_state_version(state_version) - } else { - builder - }; - - // then, we prepare to replace the code based on what the CLI wishes. - let maybe_code_to_overwrite = match shared.runtime { - Runtime::Path(ref path) => Some(std::fs::read(path).map_err(|e| { - format!("error while reading runtime file from {:?}: {:?}", path, e) - })?), - Runtime::Existing => None, - }; - - // build the main ext. - let mut ext = builder.build().await?; - - // actually replace the code if needed. - if let Some(new_code) = maybe_code_to_overwrite { - let original_code = ext - .execute_with(|| sp_io::storage::get(well_known_keys::CODE)) - .expect("':CODE:' is always downloaded in try-runtime-cli; qed"); - - // NOTE: see the impl notes of `read_runtime_version`, the ext is almost not used here, - // only as a backup. - ext.insert(well_known_keys::CODE.to_vec(), new_code.clone()); - let old_version = ::decode( - &mut &*executor.read_runtime_version(&original_code, &mut ext.ext()).unwrap(), - ) - .unwrap(); - log::info!( - target: LOG_TARGET, - "original spec: {:?}-{:?}, code hash: {:?}", - old_version.spec_name, - old_version.spec_version, - HexDisplay::from(BlakeTwo256::hash(&original_code).as_fixed_bytes()), - ); - let new_version = ::decode( - &mut &*executor.read_runtime_version(&new_code, &mut ext.ext()).unwrap(), - ) - .unwrap(); - log::info!( - target: LOG_TARGET, - "new spec: {:?}-{:?}, code hash: {:?}", - new_version.spec_name, - new_version.spec_version, - HexDisplay::from(BlakeTwo256::hash(&new_code).as_fixed_bytes()) - ); - - if new_version.spec_name != old_version.spec_name { - return Err("Spec names must match.".into()) - } - } - - // whatever runtime we have in store now must have been compiled with try-runtime feature. - if try_runtime_check { - if !ensure_try_runtime::(&executor, &mut ext) { - return Err("given runtime is NOT compiled with try-runtime feature!".into()) - } - } - - Ok(ext) - } -} - -pub const DEPRECATION_NOTICE: &str = "Substrate's `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI."; - -impl TryRuntimeCmd { - // Can't reuse DEPRECATION_NOTICE in the deprecated macro - #[deprecated( - note = "Substrate's `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI." - )] - pub async fn run( - &self, - block_building_info_provider: Option, - ) -> sc_cli::Result<()> - where - Block: BlockT + DeserializeOwned, - Block::Header: DeserializeOwned, - ::Err: Debug, - as FromStr>::Err: Debug, - as TryInto>::Error: Debug, - NumberFor: FromStr, - HostFns: HostFunctions, - BBIP: BlockBuildingInfoProvider>, - { - match &self.command { - Command::OnRuntimeUpgrade(ref cmd) => - commands::on_runtime_upgrade::on_runtime_upgrade::( - self.shared.clone(), - cmd.clone(), - ) - .await, - Command::OffchainWorker(cmd) => - commands::offchain_worker::offchain_worker::( - self.shared.clone(), - cmd.clone(), - ) - .await, - Command::ExecuteBlock(cmd) => - commands::execute_block::execute_block::( - self.shared.clone(), - cmd.clone(), - ) - .await, - Command::FollowChain(cmd) => - commands::follow_chain::follow_chain::( - self.shared.clone(), - cmd.clone(), - ) - .await, - Command::FastForward(cmd) => - commands::fast_forward::fast_forward::( - self.shared.clone(), - cmd.clone(), - block_building_info_provider, - ) - .await, - Command::CreateSnapshot(cmd) => - commands::create_snapshot::create_snapshot::( - self.shared.clone(), - cmd.clone(), - ) - .await, - } - } -} - -impl CliConfiguration for TryRuntimeCmd { - fn shared_params(&self) -> &sc_cli::SharedParams { - &self.shared.shared_params - } - - fn chain_id(&self, _is_dev: bool) -> sc_cli::Result { - Ok(match self.shared.shared_params.chain { - Some(ref chain) => chain.clone(), - None => "dev".into(), - }) - } -} - -/// Get the hash type of the generic `Block` from a `hash_str`. -pub(crate) fn hash_of(hash_str: &str) -> sc_cli::Result -where - ::Err: Debug, -{ - hash_str - .parse::<::Hash>() - .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) -} - -/// Build all extensions that we typically use. -pub(crate) fn full_extensions(wasm_executor: WasmExecutor) -> Extensions { - let mut extensions = Extensions::default(); - let (offchain, _offchain_state) = TestOffchainExt::new(); - let (pool, _pool_state) = TestTransactionPoolExt::new(); - let keystore = MemoryKeystore::new(); - extensions.register(OffchainDbExt::new(offchain.clone())); - extensions.register(OffchainWorkerExt::new(offchain)); - extensions.register(KeystoreExt::new(keystore)); - extensions.register(TransactionPoolExt::new(pool)); - extensions.register(ReadRuntimeVersionExt::new(wasm_executor)); - - extensions -} - -/// Build wasm executor by default config. -pub(crate) fn build_executor(shared: &SharedParams) -> WasmExecutor { - let heap_pages = shared - .heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |p| HeapAllocStrategy::Static { extra_pages: p as _ }); - - WasmExecutor::builder() - .with_execution_method(execution_method_from_cli( - shared.wasm_method, - shared.wasmtime_instantiation_strategy, - )) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build() -} - -/// Ensure that the given `ext` is compiled with `try-runtime` -fn ensure_try_runtime( - executor: &WasmExecutor, - ext: &mut TestExternalities>, -) -> bool { - use sp_api::RuntimeApiInfo; - let final_code = ext - .execute_with(|| sp_io::storage::get(well_known_keys::CODE)) - .expect("':CODE:' is always downloaded in try-runtime-cli; qed"); - let final_version = ::decode( - &mut &*executor.read_runtime_version(&final_code, &mut ext.ext()).unwrap(), - ) - .unwrap(); - final_version - .api_version(&>::ID) - .is_some() -} - -/// Execute the given `method` and `data` on top of `ext`, returning the results (encoded) and the -/// state `changes`. -pub(crate) fn state_machine_call( - ext: &TestExternalities>, - executor: &WasmExecutor, - method: &'static str, - data: &[u8], - mut extensions: Extensions, -) -> sc_cli::Result<(OverlayedChanges>, Vec)> { - let mut changes = Default::default(); - let encoded_results = StateMachine::new( - &ext.backend, - &mut changes, - executor, - method, - data, - &mut extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, - CallContext::Offchain, - ) - .execute() - .map_err(|e| format!("failed to execute '{}': {}", method, e)) - .map_err::(Into::into)?; - - Ok((changes, encoded_results)) -} - -/// Same as [`state_machine_call`], but it also computes and prints the storage proof in different -/// size and formats. -/// -/// Make sure [`LOG_TARGET`] is enabled in logging. -pub(crate) fn state_machine_call_with_proof( - ext: &TestExternalities>, - executor: &WasmExecutor, - method: &'static str, - data: &[u8], - mut extensions: Extensions, - maybe_export_proof: Option, -) -> sc_cli::Result<(OverlayedChanges>, Vec)> { - use parity_scale_codec::Encode; - - let mut changes = Default::default(); - let backend = ext.backend.clone(); - let runtime_code_backend = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let proving_backend = - TrieBackendBuilder::wrap(&backend).with_recorder(Default::default()).build(); - let runtime_code = runtime_code_backend.runtime_code()?; - - let pre_root = *backend.root(); - let encoded_results = StateMachine::new( - &proving_backend, - &mut changes, - executor, - method, - data, - &mut extensions, - &runtime_code, - CallContext::Offchain, - ) - .execute() - .map_err(|e| format!("failed to execute {}: {}", method, e)) - .map_err::(Into::into)?; - - let proof = proving_backend - .extract_proof() - .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - - if let Some(path) = maybe_export_proof { - let mut file = std::fs::File::create(&path).map_err(|e| { - log::error!( - target: LOG_TARGET, - "Failed to create file {}: {:?}", - path.to_string_lossy(), - e - ); - e - })?; - - log::info!(target: LOG_TARGET, "Writing storage proof to {}", path.to_string_lossy()); - - use std::io::Write as _; - file.write_all(storage_proof_to_raw_json(&proof).as_bytes()).map_err(|e| { - log::error!( - target: LOG_TARGET, - "Failed to write storage proof to {}: {:?}", - path.to_string_lossy(), - e - ); - e - })?; - } - - let proof_size = proof.encoded_size(); - let compact_proof = proof - .clone() - .into_compact_proof::>(pre_root) - .map_err(|e| { - log::error!(target: LOG_TARGET, "failed to generate compact proof {}: {:?}", method, e); - e - }) - .unwrap_or(CompactProof { encoded_nodes: Default::default() }); - - let compact_proof_size = compact_proof.encoded_size(); - let compressed_proof = zstd::stream::encode_all(&compact_proof.encode()[..], 0) - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to generate compressed proof {}: {:?}", - method, - e - ); - e - }) - .unwrap_or_default(); - - let proof_nodes = proof.into_nodes(); - - let humanize = |s| { - if s < 1024 * 1024 { - format!("{:.2} KB ({} bytes)", s as f64 / 1024f64, s) - } else { - format!( - "{:.2} MB ({} KB) ({} bytes)", - s as f64 / (1024f64 * 1024f64), - s as f64 / 1024f64, - s - ) - } - }; - log::debug!( - target: LOG_TARGET, - "proof: 0x{}... / {} nodes", - HexDisplay::from(&proof_nodes.iter().flatten().cloned().take(10).collect::>()), - proof_nodes.len() - ); - log::debug!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); - log::debug!(target: LOG_TARGET, "compact proof size: {}", humanize(compact_proof_size),); - log::debug!( - target: LOG_TARGET, - "zstd-compressed compact proof {}", - humanize(compressed_proof.len()), - ); - - log::debug!(target: LOG_TARGET, "{} executed without errors.", method); - - Ok((changes, encoded_results)) -} - -pub(crate) fn rpc_err_handler(error: impl Debug) -> &'static str { - log::error!(target: LOG_TARGET, "rpc error: {:?}", error); - "rpc error." -} - -/// Converts a [`sp_state_machine::StorageProof`] into a JSON string. -fn storage_proof_to_raw_json(storage_proof: &sp_state_machine::StorageProof) -> String { - serde_json::Value::Object( - storage_proof - .to_memory_db::() - .drain() - .iter() - .map(|(key, (value, _n))| { - ( - format!("0x{}", hex::encode(key.as_bytes())), - serde_json::Value::String(format!("0x{}", hex::encode(value))), - ) - }) - .collect(), - ) - .to_string() -} diff --git a/substrate/utils/frame/try-runtime/cli/src/parse.rs b/substrate/utils/frame/try-runtime/cli/src/parse.rs deleted file mode 100644 index 336a36baf241..000000000000 --- a/substrate/utils/frame/try-runtime/cli/src/parse.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Utils for parsing user input - -use sp_version::StateVersion; - -pub(crate) fn hash(block_hash: &str) -> Result { - let (block_hash, offset) = if let Some(block_hash) = block_hash.strip_prefix("0x") { - (block_hash, 2) - } else { - (block_hash, 0) - }; - - if let Some(pos) = block_hash.chars().position(|c| !c.is_ascii_hexdigit()) { - Err(format!( - "Expected block hash, found illegal hex character at position: {}", - offset + pos, - )) - } else { - Ok(block_hash.into()) - } -} - -pub(crate) fn url(s: &str) -> Result { - if s.starts_with("ws://") || s.starts_with("wss://") { - // could use Url crate as well, but lets keep it simple for now. - Ok(s.to_string()) - } else { - Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") - } -} - -pub(crate) fn state_version(s: &str) -> Result { - s.parse::() - .map_err(|_| ()) - .and_then(StateVersion::try_from) - .map_err(|_| "Invalid state version.") -} diff --git a/templates/parachain/node/src/cli.rs b/templates/parachain/node/src/cli.rs index a5c55b8118a8..cffbfbc1db23 100644 --- a/templates/parachain/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -37,11 +37,6 @@ pub enum Subcommand { /// The pallet benchmarking moved to the `pallet` sub-command. #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - - /// Try-runtime has migrated to a standalone - /// [CLI](). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after January 2024. - TryRuntime, } const AFTER_HELP_EXAMPLE: &str = color_print::cstr!( diff --git a/templates/parachain/node/src/command.rs b/templates/parachain/node/src/command.rs index 3907b1e247ab..56ae022cad2b 100644 --- a/templates/parachain/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -216,7 +216,6 @@ pub fn run() -> Result<()> { _ => Err("Benchmarking sub-command unsupported".into()), } }, - Some(Subcommand::TryRuntime) => Err("The `try-runtime` subcommand has been migrated to a standalone CLI (https://github.com/paritytech/try-runtime-cli). It is no longer being maintained here and will be removed entirely some time after January 2024. Please remove this subcommand from your runtime and use the standalone CLI.".into()), None => { let runner = cli.create_runner(&cli.run.normalize())?; let collator_options = cli.run.collator_options(); diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 1ddd1cb3a7b4..9332da3a6549 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -64,9 +64,6 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c # Local Dependencies solochain-template-runtime = { path = "../runtime" } -# CLI-specific dependencies -try-runtime-cli = { path = "../../../substrate/utils/frame/try-runtime/cli", optional = true } - [build-dependencies] substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } @@ -87,5 +84,4 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "solochain-template-runtime/try-runtime", "sp-runtime/try-runtime", - "try-runtime-cli/try-runtime", ] diff --git a/templates/solochain/node/src/cli.rs b/templates/solochain/node/src/cli.rs index 7f1b67b3696e..b2c53aa70949 100644 --- a/templates/solochain/node/src/cli.rs +++ b/templates/solochain/node/src/cli.rs @@ -41,11 +41,6 @@ pub enum Subcommand { #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try-runtime has migrated to a standalone CLI - /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after January 2024. - TryRuntime, - /// Db meta columns information. ChainInfo(sc_cli::ChainInfoCmd), } diff --git a/templates/solochain/node/src/command.rs b/templates/solochain/node/src/command.rs index 7f6df42fb0f9..e46fedc91f0e 100644 --- a/templates/solochain/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -170,12 +170,6 @@ pub fn run() -> sc_cli::Result<()> { } }) }, - #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.into()), - #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ - You can enable it with `--features try-runtime`." - .into()), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(&config))