From b9b34fb983dac58ae05b0e1379e20363f6f7c88e Mon Sep 17 00:00:00 2001 From: Evgeny Snitko Date: Thu, 5 Sep 2024 19:27:18 +0400 Subject: [PATCH 1/7] Github actions coverage (#5148) Jobs for coverage collections and upload to codecov for github PR's Uses same test suit as test-linux-stable, splits tests into 5 parallel jobs, uploads to codecov, generates report comment and status checks (can be made required) | | | | --- | --- | | image | image | Codecov behavior (required coverage, thresholds, comment info etc.) is highly customizable via `.github/codecov.yaml` ([reference](https://docs.codecov.com/docs/codecovyml-reference)) Unfortunately, some tests are excluded because with `-C instrument-coverage` they run very slowly, flaky, or fail (see [nextest filter expression](https://github.com/paritytech/polkadot-sdk/pull/5148/files#diff-b19504a9520a2498d03020108344d8e6d93d254d812bfa26247faaa7f55263d6R80) of test-linux-stable-coverage). So for now, this workflow is optional, and will only run for pr's with the `GHA-coverage` label --- .github/codecov.yml | 8 +- .../workflows/tests-linux-stable-coverage.yml | 143 ++++++++++++++++++ substrate/bin/node/runtime/src/lib.rs | 7 +- substrate/frame/babe/src/mock.rs | 6 +- substrate/frame/grandpa/src/mock.rs | 6 +- 5 files changed, 159 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/tests-linux-stable-coverage.yml diff --git a/.github/codecov.yml b/.github/codecov.yml index ceceb9e63654..b237c9fe6b04 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -6,4 +6,10 @@ coverage: project: default: target: 1.0 - threshold: 2.0 \ No newline at end of file + threshold: 2.0 + +comment: + behavior: new + +fixes: + - "/__w/polkadot-sdk/polkadot-sdk/::" \ No newline at end of file diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml new file mode 100644 index 000000000000..ddf0642a4043 --- /dev/null +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -0,0 +1,143 @@ +# GHA for test-linux-stable-int, test-linux-stable, test-linux-stable-oldkernel +name: tests linux stable coverage + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review, labeled] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + if: contains(github.event.label.name, 'GHA-coverage') || contains(github.event.pull_request.labels.*.name, 'GHA-coverage') + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_runner.outputs.RUNNER }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + - id: set_runner + run: | + # Run merge queues on persistent runners + if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then + echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT + else + echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + fi + + # + # + # + test-linux-stable-coverage: + needs: [set-image] + runs-on: ${{ needs.set-image.outputs.RUNNER }} + timeout-minutes: 120 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + # + # -Cinstrument-coverage slows everything down but it is necessary for code coverage + # https://doc.rust-lang.org/rustc/instrument-coverage.html + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings -Cinstrument-coverage" + LLVM_PROFILE_FILE: "/__w/polkadot-sdk/polkadot-sdk/target/coverage/cargo-test-${{ matrix.ci_node_index }}-%p-%m.profraw" + strategy: + fail-fast: false + matrix: + ci_node_index: [1, 2, 3, 4, 5] + ci_node_total: [5] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - run: rustup component add llvm-tools-preview + - run: cargo install cargo-llvm-cov + + - run: mkdir -p target/coverage + + # Some tests are excluded because they run very slowly or fail with -Cinstrument-coverage + - name: run tests + run: > + time cargo llvm-cov nextest + --no-report --release + --workspace + --locked --no-fail-fast + --features try-runtime,ci-only-tests,experimental,riscv + --filter-expr " + !test(/.*benchmark.*/) + - test(/recovers_from_only_chunks_if_pov_large::case_1/) + - test(/participation_requests_reprioritized_for_newly_included/) + - test(/availability_is_recovered_from_chunks_if_no_group_provided::case_1/) + - test(/rejects_missing_inherent_digest/) + - test(/availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only::case_1/) + - test(/availability_is_recovered_from_chunks_if_no_group_provided::case_2/) + - test(/all_security_features_work/) + - test(/nonexistent_cache_dir/) + - test(/recovers_from_only_chunks_if_pov_large::case_3/) + - test(/recovers_from_only_chunks_if_pov_large::case_2/) + - test(/authoring_blocks/) + - test(/rejects_missing_seals/) + - test(/generate_chain_spec/) + - test(/get_preset/) + - test(/list_presets/) + - test(/tests::receive_rate_limit_is_enforced/) + - test(/polkadot-availability-recovery/) + " + --partition count:${{ matrix.ci_node_index }}/${{ matrix.ci_node_total }} + + - name: generate report + run: cargo llvm-cov report --release --codecov --output-path coverage-${{ matrix.ci_node_index }}.lcov + - name: upload report + uses: actions/upload-artifact@v4 + with: + name: coverage-report-${{ matrix.ci_node_index }}.lcov + path: coverage-${{ matrix.ci_node_index }}.lcov + + # + # + # Upload to codecov + upload-reports: + needs: [test-linux-stable-coverage] + runs-on: ubuntu-latest + steps: + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: reports + pattern: coverage-report-* + merge-multiple: true + - run: ls -al reports/ + - name: Upload to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + verbose: true + directory: reports + root_dir: /__w/polkadot-sdk/polkadot-sdk/ + + # + # + # + remove-label: + runs-on: ubuntu-latest + needs: [upload-reports] + if: github.event_name == 'pull_request' + steps: + - uses: actions/checkout@v2 + - uses: actions-ecosystem/action-remove-labels@v1 + with: + labels: GHA-coverage \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 001b2273c9b2..6ae04902aa82 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -507,8 +507,7 @@ impl pallet_babe::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_babe::EquivocationReportSystem; } @@ -1534,7 +1533,7 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_grandpa::EquivocationReportSystem; } @@ -2614,7 +2613,7 @@ impl pallet_beefy::Config for Runtime { type OnNewValidatorSet = MmrLeaf; type AncestryHelper = MmrLeaf; type WeightInfo = (); - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = pallet_beefy::EquivocationReportSystem; } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 912cb3e27cd5..4e4052b2b566 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -25,12 +25,12 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ - crypto::{KeyTypeId, Pair, VrfSecret}, + crypto::{Pair, VrfSecret}, U256, }; use sp_io; @@ -182,7 +182,7 @@ impl Config for Test { type WeightInfo = (); type MaxAuthorities = ConstU32<10>; type MaxNominators = ConstU32<100>; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index ae230a0209a7..caac4107cfb7 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -28,11 +28,11 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, + traits::{ConstU128, ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_core::{crypto::KeyTypeId, H256}; +use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -186,7 +186,7 @@ impl Config for Test { type MaxAuthorities = ConstU32<100>; type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = super::EquivocationReportSystem; } From 702a15cbaa032899f2321fda892faf723d32efca Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 5 Sep 2024 17:02:24 +0100 Subject: [PATCH 2/7] minor fixes pipeline (#5607) - [return macos jobs to gitlab](https://github.com/paritytech/polkadot-sdk/commit/dcd44b1d8bb681b66cbc0a063a6a999bd8253cdc) - [add benches to merge queue](https://github.com/paritytech/polkadot-sdk/commit/494eb21bb9ac4633f3217e6b58ba7256aea6e38a) - [require test-deterministic-wasm and run it earlier](https://github.com/paritytech/polkadot-sdk/commit/ab9ae5ca6c5128e002cc745d608e542138633250) --- .github/workflows/tests-misc.yml | 69 +++++++++++++------------------- .gitlab-ci.yml | 5 +++ .gitlab/pipeline/test.yml | 22 ++++++++++ 3 files changed, 54 insertions(+), 42 deletions(-) diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 824e8c11c2a4..2e78f4a34ede 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -121,7 +121,7 @@ jobs: test-deterministic-wasm: timeout-minutes: 20 - needs: [ set-image, test-frame-ui ] + needs: [ set-image ] runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -144,7 +144,7 @@ jobs: cargo-check-benches-branches: needs: [ set-image ] - if: ${{ github.event_name == 'pull_request' }} + if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} timeout-minutes: 60 outputs: branch: ${{ steps.branch.outputs.branch }} @@ -354,46 +354,30 @@ jobs: cp .forklift/config.toml /github/home/.forklift/config.toml PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} - # TODO: enable when we have a macos Self-Hosted runners - # cargo-check-each-crate-macos: - # timeout-minutes: 120 - # needs: [ set-image ] - # runs-on: macos-latest - # env: - # RUSTFLAGS: "-D warnings" - # CI_JOB_NAME: cargo-check-each-crate - # IMAGE: ${{ needs.set-image.outputs.IMAGE }} - # strategy: - # fail-fast: false - # matrix: - # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs - # steps: - # - name: Checkout - # uses: actions/checkout@v4.1.7 - # - # - run: | - # VERSION=$(echo $IMAGE | sed -E 's/.*:bullseye-([^-]+)-.*/\1/') - # echo $VERSION - # echo "VERSION=$VERSION" >> $GITHUB_ENV - # - # - run: | - # rustup install $VERSION - # rustup default $VERSION - # - # - name: Check Rust - # run: | - # rustup show - # rustup +nightly show - # - # - name: MacOS Deps - # run: | - # brew install protobuf openssl pkg-config zlib xz zstd llvm jq curl gcc make cmake - # rustup target add wasm32-unknown-unknown --toolchain $VERSION - # rustup component add rust-src rustfmt clippy --toolchain $VERSION - # - # - name: script - # run: | - # PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} True + # cargo-check-each-crate-macos: + # timeout-minutes: 120 + # needs: [ set-image ] + # runs-on: macOS + # env: + # RUSTFLAGS: "-D warnings" + # CI_JOB_NAME: cargo-check-each-crate + # IMAGE: ${{ needs.set-image.outputs.IMAGE }} + # strategy: + # fail-fast: false + # matrix: + # index: [ 1,2,3,4,5,6,7,8,9,10 ] # 10 parallel jobs + # steps: + # - name: Checkout + # uses: actions/checkout@v4.1.7 + + # - name: Install dependencies + # uses: ./.github/actions/set-up-mac + # with: + # IMAGE: ${{ needs.set-image.outputs.IMAGE }} + + # - name: script + # run: | + # PYTHONUNBUFFERED=x .github/scripts/check-each-crate.py ${{ matrix.index }} ${{ strategy.job-total }} True confirm-required-test-misc-jobs-passed: runs-on: ubuntu-latest @@ -408,6 +392,7 @@ jobs: - test-node-metrics - check-tracing - cargo-check-each-crate + - test-deterministic-wasm # - cargo-hfuzz remove from required for now, as it's flaky steps: - run: echo '### Good job! All the required tests passed 🚀' >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8b4ca48150b1..43123cdbfc41 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -288,3 +288,8 @@ cancel-pipeline-build-short-benchmark: extends: .cancel-pipeline-template needs: - job: build-short-benchmark + +cancel-pipeline-cargo-check-each-crate-macos: + extends: .cancel-pipeline-template + needs: + - job: cargo-check-each-crate-macos \ No newline at end of file diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 00a0aa2c9771..0879870ae13c 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -153,3 +153,25 @@ quick-benchmarks-omni: script: - time cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks - time cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet + +cargo-check-each-crate-macos: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + # - .collect-artifacts + before_script: + # skip timestamp script, the osx bash doesn't support printf %()T + - !reference [.job-switcher, before_script] + - !reference [.rust-info-script, script] + - !reference [.pipeline-stopper-vars, script] + variables: + SKIP_WASM_BUILD: 1 + script: + # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available + # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 + - time cargo check --workspace --locked + timeout: 2h + tags: + - osx From 8d81f1e648a21d7d14f94bc86503d3c77ead5807 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Thu, 5 Sep 2024 17:07:14 +0100 Subject: [PATCH 3/7] /cmd followups (#5533) Closes: https://github.com/paritytech/polkadot-sdk/issues/5545 - add missing template for frame & xcm benchmarks - fix `git pull` -> https://github.com/paritytech/polkadot-sdk/actions/runs/10644887539/job/29510118915 - respect runtimes headers - use GNU instead of apache for runtimes - adds tests for cmd.py Tip: review this one with Whitespace hidden ![image](https://github.com/user-attachments/assets/3bcdc6c2-7371-428f-9962-556ca81c1467) --------- Co-authored-by: GitHub Action --- .github/scripts/cmd/cmd.py | 275 +++++++++++--------- .github/scripts/cmd/test_cmd.py | 321 ++++++++++++++++++++++++ .github/workflows/cmd-tests.yml | 14 ++ .github/workflows/cmd.yml | 2 +- .github/workflows/runtimes-matrix.json | 29 +++ substrate/frame/balances/src/weights.rs | 138 +++++----- 6 files changed, 584 insertions(+), 195 deletions(-) create mode 100644 .github/scripts/cmd/test_cmd.py create mode 100644 .github/workflows/cmd-tests.yml diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 63bd6a2795aa..1c08b621467d 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -11,6 +11,8 @@ f = open('.github/workflows/runtimes-matrix.json', 'r') runtimesMatrix = json.load(f) +print(f'runtimesMatrix: {runtimesMatrix}\n') + runtimeNames = list(map(lambda x: x['name'], runtimesMatrix)) common_args = { @@ -67,130 +69,153 @@ for arg, config in common_args.items(): parser_ui.add_argument(arg, **config) +def main(): + global args, unknown, runtimesMatrix + args, unknown = parser.parse_known_args() + + print(f'args: {args}') + + if args.command == 'bench': + runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} + + profile = "release" + + print(f'Provided runtimes: {args.runtime}') + # convert to mapped dict + runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) + runtimesMatrix = {x['name']: x for x in runtimesMatrix} + print(f'Filtered out runtimes: {runtimesMatrix}') + + # loop over remaining runtimes to collect available pallets + for runtime in runtimesMatrix.values(): + os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") + print(f'-- listing pallets for benchmark for {runtime["name"]}') + wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" + output = os.popen( + f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() + raw_pallets = output.strip().split('\n') + + all_pallets = set() + for pallet in raw_pallets: + if pallet: + all_pallets.add(pallet.split(',')[0].strip()) + + pallets = list(all_pallets) + print(f'Pallets in {runtime["name"]}: {pallets}') + runtime_pallets_map[runtime['name']] = pallets + + print(f'\n') + + # filter out only the specified pallets from collected runtimes/pallets + if args.pallet: + print(f'Pallets: {args.pallet}') + new_pallets_map = {} + # keep only specified pallets if they exist in the runtime + for runtime in runtime_pallets_map: + if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): + new_pallets_map[runtime] = args.pallet + + runtime_pallets_map = new_pallets_map + + print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') + + if not runtime_pallets_map: + if args.pallet and not args.runtime: + print(f"No pallets {args.pallet} found in any runtime") + elif args.runtime and not args.pallet: + print(f"{args.runtime} runtime does not have any pallets") + elif args.runtime and args.pallet: + print(f"No pallets {args.pallet} found in {args.runtime}") + else: + print('No runtimes found') + sys.exit(1) -args, unknown = parser.parse_known_args() - -print(f'args: {args}') - -if args.command == 'bench': - runtime_pallets_map = {} - failed_benchmarks = {} - successful_benchmarks = {} - - profile = "release" - - print(f'Provided runtimes: {args.runtime}') - # convert to mapped dict - runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) - runtimesMatrix = {x['name']: x for x in runtimesMatrix} - print(f'Filtered out runtimes: {runtimesMatrix}') - - # loop over remaining runtimes to collect available pallets - for runtime in runtimesMatrix.values(): - os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") - print(f'-- listing pallets for benchmark for {runtime["name"]}') - wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen( - f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() - raw_pallets = output.strip().split('\n') - - all_pallets = set() - for pallet in raw_pallets: - if pallet: - all_pallets.add(pallet.split(',')[0].strip()) - - pallets = list(all_pallets) - print(f'Pallets in {runtime}: {pallets}') - runtime_pallets_map[runtime['name']] = pallets - - # filter out only the specified pallets from collected runtimes/pallets - if args.pallet: - print(f'Pallet: {args.pallet}') - new_pallets_map = {} - # keep only specified pallets if they exist in the runtime for runtime in runtime_pallets_map: - if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): - new_pallets_map[runtime] = args.pallet - - runtime_pallets_map = new_pallets_map - - print(f'Filtered out runtimes & pallets: {runtime_pallets_map}') - - if not runtime_pallets_map: - if args.pallet and not args.runtime: - print(f"No pallets {args.pallet} found in any runtime") - elif args.runtime and not args.pallet: - print(f"{args.runtime} runtime does not have any pallets") - elif args.runtime and args.pallet: - print(f"No pallets {args.pallet} found in {args.runtime}") - else: - print('No runtimes found') - sys.exit(1) - - header_path = os.path.abspath('./substrate/HEADER-APACHE2') - - for runtime in runtime_pallets_map: - for pallet in runtime_pallets_map[runtime]: - config = runtimesMatrix[runtime] - print(f'-- config: {config}') - if runtime == 'dev': - # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) - search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" - print(f'-- running: {search_manifest_path}') - manifest_path = os.popen(search_manifest_path).read() - if not manifest_path: - print(f'-- pallet {pallet} not found in dev runtime') - exit(1) - package_dir = os.path.dirname(manifest_path) - print(f'-- package_dir: {package_dir}') - print(f'-- manifest_path: {manifest_path}') - output_path = os.path.join(package_dir, "src", "weights.rs") - else: - default_path = f"./{config['path']}/src/weights" - xcm_path = f"./{config['path']}/src/weights/xcm" - output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path - print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - cmd = f"frame-omni-bencher v1 benchmark pallet --extrinsic=* --runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm --pallet={pallet} --header={header_path} --output={output_path} --wasm-execution=compiled --steps=50 --repeat=20 --heap-pages=4096 --no-storage-info --no-min-squares --no-median-slopes" - print(f'-- Running: {cmd}') - status = os.system(cmd) - if status != 0 and not args.continue_on_fail: - print(f'Failed to benchmark {pallet} in {runtime}') - sys.exit(1) - - # Otherwise collect failed benchmarks and print them at the end - # push failed pallets to failed_benchmarks - if status != 0: - failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] - else: - successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] - - if failed_benchmarks: - print('❌ Failed benchmarks of runtimes/pallets:') - for runtime, pallets in failed_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - - if successful_benchmarks: - print('✅ Successful benchmarks of runtimes/pallets:') - for runtime, pallets in successful_benchmarks.items(): - print(f'-- {runtime}: {pallets}') - -elif args.command == 'fmt': - command = f"cargo +nightly fmt" - print(f'Formatting with `{command}`') - nightly_status = os.system(f'{command}') - taplo_status = os.system('taplo format --config .config/taplo.toml') - - if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -elif args.command == 'update-ui': - command = 'sh ./scripts/update-ui-tests.sh' - print(f'Updating ui with `{command}`') - status = os.system(f'{command}') - - if status != 0 and not args.continue_on_fail: - print('❌ Failed to format code') - sys.exit(1) - -print('🚀 Done') + for pallet in runtime_pallets_map[runtime]: + config = runtimesMatrix[runtime] + header_path = os.path.abspath(config['header']) + template = None + + print(f'-- config: {config}') + if runtime == 'dev': + # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) + search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" + print(f'-- running: {search_manifest_path}') + manifest_path = os.popen(search_manifest_path).read() + if not manifest_path: + print(f'-- pallet {pallet} not found in dev runtime') + exit(1) + package_dir = os.path.dirname(manifest_path) + print(f'-- package_dir: {package_dir}') + print(f'-- manifest_path: {manifest_path}') + output_path = os.path.join(package_dir, "src", "weights.rs") + template = config['template'] + else: + default_path = f"./{config['path']}/src/weights" + xcm_path = f"./{config['path']}/src/weights/xcm" + output_path = default_path + if pallet.startswith("pallet_xcm_benchmarks"): + template = config['template'] + output_path = xcm_path + + print(f'-- benchmarking {pallet} in {runtime} into {output_path}') + cmd = f"frame-omni-bencher v1 benchmark pallet " \ + f"--extrinsic=* " \ + f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " \ + f"--pallet={pallet} " \ + f"--header={header_path} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 " \ + f"--repeat=20 " \ + f"--heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + print(f'-- Running: {cmd} \n') + status = os.system(cmd) + if status != 0 and not args.continue_on_fail: + print(f'Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + elif args.command == 'fmt': + command = f"cargo +nightly fmt" + print(f'Formatting with `{command}`') + nightly_status = os.system(f'{command}') + taplo_status = os.system('taplo format --config .config/taplo.toml') + + if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + elif args.command == 'update-ui': + command = 'sh ./scripts/update-ui-tests.sh' + print(f'Updating ui with `{command}`') + status = os.system(f'{command}') + + if status != 0 and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) + + print('🚀 Done') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py new file mode 100644 index 000000000000..4cf1b290915d --- /dev/null +++ b/.github/scripts/cmd/test_cmd.py @@ -0,0 +1,321 @@ +import unittest +from unittest.mock import patch, mock_open, MagicMock, call +import json +import sys +import os +import argparse + +# Mock data for runtimes-matrix.json +mock_runtimes_matrix = [ + {"name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs"}, + {"name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, + {"name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs"}, +] + +def get_mock_bench_output(runtime, pallets, output_path, header, template = None): + return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ + f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--pallet={pallets} --header={header} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 --repeat=20 --heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes" + +class TestCmd(unittest.TestCase): + + def setUp(self): + self.patcher1 = patch('builtins.open', new_callable=mock_open, read_data=json.dumps(mock_runtimes_matrix)) + self.patcher2 = patch('json.load', return_value=mock_runtimes_matrix) + self.patcher3 = patch('argparse.ArgumentParser.parse_known_args') + self.patcher4 = patch('os.system', return_value=0) + self.patcher5 = patch('os.popen') + + self.mock_open = self.patcher1.start() + self.mock_json_load = self.patcher2.start() + self.mock_parse_args = self.patcher3.start() + self.mock_system = self.patcher4.start() + self.mock_popen = self.patcher5.start() + + # Ensure that cmd.py uses the mock_runtimes_matrix + import cmd + cmd.runtimesMatrix = mock_runtimes_matrix + + def tearDown(self): + self.patcher1.stop() + self.patcher2.stop() + self.patcher3.stop() + self.patcher4.stop() + self.patcher5.stop() + + def test_bench_command_normal_execution_all_runtimes(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + "pallet_staking\npallet_something\n", # Output for rococo runtime - no pallet here + "pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-westend runtime + "./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + + call(get_mock_bench_output('kitchensink', 'pallet_balances', './substrate/frame/balances/src/weights.rs', os.path.abspath('substrate/HEADER-APACHE2'), "substrate/.maintain/frame-weight-template.hbs")), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', os.path.abspath('polkadot/file_header.txt'))), + # skips rococo benchmark + call(get_mock_bench_output('asset-hub-westend', 'pallet_balances', './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', os.path.abspath('cumulus/file_header.txt'))), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_normal_execution(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + + def test_bench_command_normal_execution_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend'], + pallet=['pallet_xcm_benchmarks::generic'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + header_path = os.path.abspath('polkadot/file_header.txt') + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for westend runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + + # Westend runtime calls + call(get_mock_bench_output( + 'westend', + 'pallet_xcm_benchmarks::generic', + './polkadot/runtime/westend/src/weights/xcm', + header_path, + "polkadot/xcm/pallet-xcm-benchmarks/template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_two_runtimes_two_pallets(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['westend', 'rococo'], + pallet=['pallet_balances', 'pallet_staking'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_staking\npallet_balances\n", # Output for westend runtime + "pallet_staking\npallet_balances\n", # Output for rococo runtime + ] + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + header_path = os.path.abspath('polkadot/file_header.txt') + + expected_calls = [ + # Build calls + call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + # Rococo runtime calls + call(get_mock_bench_output('rococo', 'pallet_staking', './polkadot/runtime/rococo/src/weights', header_path)), + call(get_mock_bench_output('rococo', 'pallet_balances', './polkadot/runtime/rococo/src/weights', header_path)), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_dev_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['dev'], + pallet=['pallet_balances'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + manifest_dir = "substrate/frame/kitchensink" + self.mock_popen.return_value.read.side_effect = [ + "pallet_balances\npallet_something", # Output for dev runtime + manifest_dir + "/Cargo.toml" # Output for manifest path in dev runtime + ] + header_path = os.path.abspath('substrate/HEADER-APACHE2') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + # Westend runtime calls + call(get_mock_bench_output( + 'kitchensink', + 'pallet_balances', + manifest_dir + "/src/weights.rs", + header_path, + "substrate/.maintain/frame-weight-template.hbs" + )), + ] + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + def test_bench_command_one_cumulus_runtime_xcm(self): + self.mock_parse_args.return_value = (argparse.Namespace( + command='bench', + runtime=['asset-hub-westend'], + pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], + continue_on_fail=False, + quiet=False, + clean=False, + image=None + ), []) + self.mock_popen.return_value.read.side_effect = [ + "pallet_assets\npallet_xcm_benchmarks::generic\n", # Output for asset-hub-westend runtime + ] + header_path = os.path.abspath('cumulus/file_header.txt') + + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + + expected_calls = [ + # Build calls + call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + # Asset-hub-westend runtime calls + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_xcm_benchmarks::generic', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm', + header_path, + "cumulus/templates/xcm-bench-template.hbs" + )), + call(get_mock_bench_output( + 'asset-hub-westend', + 'pallet_assets', + './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header_path + )), + ] + + self.mock_system.assert_has_calls(expected_calls, any_order=True) + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_fmt_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_any_call('cargo +nightly fmt') + mock_system.assert_any_call('taplo format --config .config/taplo.toml') + + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), [])) + @patch('os.system', return_value=0) + def test_update_ui_command(self, mock_system, mock_parse_args): + with patch('sys.exit') as mock_exit: + import cmd + cmd.main() + mock_exit.assert_not_called() + mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/.github/workflows/cmd-tests.yml b/.github/workflows/cmd-tests.yml new file mode 100644 index 000000000000..87d7ee1dcc2d --- /dev/null +++ b/.github/workflows/cmd-tests.yml @@ -0,0 +1,14 @@ +name: Command Bot Tests + +on: + pull_request: + +permissions: + contents: read + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: python3 .github/scripts/cmd/test_cmd.py \ No newline at end of file diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index dfdf771a6105..79a4f6c3b19c 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -358,7 +358,7 @@ jobs: git config --local user.email "action@github.com" git config --local user.name "GitHub Action" - git pull origin ${{ needs.get-pr-branch.outputs.pr-branch }} + git pull --rebase origin ${{ needs.get-pr-branch.outputs.pr-branch }} git add . git restore --staged Cargo.lock # ignore changes in Cargo.lock git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index 45a3acd3f166..102437876daf 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -3,6 +3,8 @@ "name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", + "header": "substrate/HEADER-APACHE2", + "template": "substrate/.maintain/frame-weight-template.hbs", "uri": null, "is_relay": false }, @@ -10,6 +12,8 @@ "name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", "uri": "wss://try-runtime-westend.polkadot.io:443", "is_relay": true }, @@ -17,6 +21,8 @@ "name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", "uri": "wss://try-runtime-rococo.polkadot.io:443", "is_relay": true }, @@ -24,6 +30,8 @@ "name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -31,6 +39,8 @@ "name": "asset-hub-rococo", "package": "asset-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -38,6 +48,8 @@ "name": "bridge-hub-rococo", "package": "bridge-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/bridges/bridge-hub-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -45,6 +57,8 @@ "name": "bridge-hub-westend", "package": "bridge-hub-rococo-runtime", "path": "cumulus/parachains/runtimes/bridges/bridge-hub-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", "is_relay": false }, @@ -52,12 +66,16 @@ "name": "collectives-westend", "package": "collectives-westend-runtime", "path": "cumulus/parachains/runtimes/collectives/collectives-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-collectives-rpc.polkadot.io:443" }, { "name": "contracts-rococo", "package": "contracts-rococo-runtime", "path": "cumulus/parachains/runtimes/contracts/contracts-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", "is_relay": false }, @@ -65,6 +83,8 @@ "name": "coretime-rococo", "package": "coretime-rococo-runtime", "path": "cumulus/parachains/runtimes/coretime/coretime-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", "is_relay": false }, @@ -72,6 +92,8 @@ "name": "coretime-westend", "package": "coretime-westend-runtime", "path": "cumulus/parachains/runtimes/coretime/coretime-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-coretime-rpc.polkadot.io:443", "is_relay": false }, @@ -79,12 +101,17 @@ "name": "glutton-westend", "package": "glutton-westend-runtime", "path": "cumulus/parachains/runtimes/gluttons/glutton-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", + "uri": null, "is_relay": false }, { "name": "people-rococo", "package": "people-rococo-runtime", "path": "cumulus/parachains/runtimes/people/people-rococo", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://rococo-people-rpc.polkadot.io:443", "is_relay": false }, @@ -92,6 +119,8 @@ "name": "people-westend", "package": "people-westend-runtime", "path": "cumulus/parachains/runtimes/people/people-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", "uri": "wss://westend-people-rpc.polkadot.io:443", "is_relay": false } diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index e82c97160efc..55decef273f6 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,27 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 +//! DATE: 2024-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `8f4ffe8f7785`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --runtime=target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/balances/src/weights.rs // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/balances/src/weights.rs -// --template=./substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,8 +71,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +91,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,10 +102,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,10 +135,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -161,22 +161,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } @@ -188,8 +188,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 47_552_000 picoseconds. - Weight::from_parts(48_363_000, 3593) + // Minimum execution time: 75_624_000 picoseconds. + Weight::from_parts(77_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -199,8 +199,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 37_565_000 picoseconds. - Weight::from_parts(38_159_000, 3593) + // Minimum execution time: 60_398_000 picoseconds. + Weight::from_parts(61_290_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -208,10 +208,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 14_147_000 picoseconds. - Weight::from_parts(14_687_000, 3593) + // Minimum execution time: 18_963_000 picoseconds. + Weight::from_parts(19_802_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -219,10 +219,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_188_000 picoseconds. - Weight::from_parts(19_929_000, 3593) + // Minimum execution time: 30_517_000 picoseconds. + Weight::from_parts(31_293_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -230,10 +230,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `52` // Estimated: `6196` - // Minimum execution time: 48_903_000 picoseconds. - Weight::from_parts(49_944_000, 6196) + // Minimum execution time: 77_017_000 picoseconds. + Weight::from_parts(78_184_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -243,8 +243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) + // Minimum execution time: 75_600_000 picoseconds. + Weight::from_parts(76_817_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -252,10 +252,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 16_750_000 picoseconds. - Weight::from_parts(17_233_000, 3593) + // Minimum execution time: 24_503_000 picoseconds. + Weight::from_parts(25_026_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -266,10 +266,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_333_000 picoseconds. - Weight::from_parts(16_588_000, 990) - // Standard Error: 12_254 - .saturating_add(Weight::from_parts(13_973_659, 0).saturating_mul(u.into())) + // Minimum execution time: 24_077_000 picoseconds. + Weight::from_parts(24_339_000, 990) + // Standard Error: 18_669 + .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -278,21 +278,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_265_000 picoseconds. - Weight::from_parts(6_594_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_727_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 30_151_000 picoseconds. - Weight::from_parts(30_968_000, 0) + // Minimum execution time: 46_978_000 picoseconds. + Weight::from_parts(47_917_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 20_055_000 picoseconds. - Weight::from_parts(20_711_000, 0) + // Minimum execution time: 31_141_000 picoseconds. + Weight::from_parts(31_917_000, 0) } } From fdb4554e26ebdd4d729158501a3ddb3c6ebdfb6f Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Fri, 6 Sep 2024 16:21:09 +0800 Subject: [PATCH 4/7] Introduce `BlockGap` (#5592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, block gaps could only be created by warp sync, but block gaps will also be generated by fast sync once #5406 is fixed. This PR is part 1 of the detailed implementation plan in https://github.com/paritytech/polkadot-sdk/issues/5406#issuecomment-2325064863: refactor `BlockGap`. This refactor converts the existing `(NumberFor, NumberFor)` into a dedicated `BlockGap>` struct. This change is purely structural and does not alter existing logic, but lays the groundwork for the follow-up PR. The compatibility concern caused by the new structure is addressed in the second commit. cc @dmitry-markin --------- Co-authored-by: Bastian Köcher --- prdoc/pr_5592.prdoc | 26 +++++++ substrate/client/consensus/babe/src/lib.rs | 6 +- substrate/client/db/src/lib.rs | 73 ++++++++++++------- substrate/client/db/src/utils.rs | 47 ++++++++++-- .../network/sync/src/strategy/chain_sync.rs | 4 +- substrate/client/service/src/client/client.rs | 5 +- .../primitives/blockchain/src/backend.rs | 40 ++++++++-- 7 files changed, 153 insertions(+), 48 deletions(-) create mode 100644 prdoc/pr_5592.prdoc diff --git a/prdoc/pr_5592.prdoc b/prdoc/pr_5592.prdoc new file mode 100644 index 000000000000..9d51917db7b1 --- /dev/null +++ b/prdoc/pr_5592.prdoc @@ -0,0 +1,26 @@ +title: Introduce `BlockGap` + +doc: + - audience: Node Dev + description: | + This is the first step towards https://github.com/paritytech/polkadot-sdk/issues/5406, + refactoring the representation of block gap. This refactor converts the existing + `(NumberFor, NumberFor)` into a dedicated `BlockGap>` + struct. This change is purely structural and does not alter existing logic, but lays + the groundwork for the follow-up PR. The compatibility concern in the database caused + by the new structure transition is addressed as well. + + The `BlockGap` refactoring results in breaking changes in the `Info` structure returned + in `client.info()`. + +crates: + - name: sc-consensus-babe + bump: none + - name: sc-client-db + bump: none + - name: sc-network-sync + bump: none + - name: sc-service + bump: none + - name: sp-blockchain + bump: major diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 9770b16871e1..4cf66302ec85 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1146,7 +1146,9 @@ where let info = self.client.info(); let number = *block.header.number(); - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block.with_state() { + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || + block.with_state() + { // Verification for imported blocks is skipped in two cases: // 1. When importing blocks below the last finalized block during network initial // synchronization. @@ -1420,7 +1422,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an // epoch change or because of missing epoch data in the tree, respectively. - if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) || block_status == BlockStatus::InChain { // When re-importing existing block strip away intermediates. diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index eadb26254a18..4559a01e57e3 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -61,6 +61,7 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, + blockchain::{BlockGap, BlockGapType}, leaves::{FinalizationOutcome, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, @@ -91,6 +92,7 @@ use sp_state_machine::{ StorageValue, UsageInfo as StateUsageInfo, }; use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB}; +use utils::BLOCK_GAP_CURRENT_VERSION; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -522,7 +524,7 @@ impl BlockchainDb { } } - fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) { + fn update_block_gap(&self, gap: Option>>) { let mut meta = self.meta.write(); meta.block_gap = gap; } @@ -1671,35 +1673,56 @@ impl Backend { ); } - if let Some((mut start, end)) = block_gap { - if number == start { - start += One::one(); - utils::insert_number_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - if start > end { - transaction.remove(columns::META, meta_keys::BLOCK_GAP); - block_gap = None; - debug!(target: "db", "Removed block gap."); - } else { - block_gap = Some((start, end)); - debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &(start, end).encode(), - ); - } - block_gap_updated = true; + if let Some(mut gap) = block_gap { + match gap.gap_type { + BlockGapType::MissingHeaderAndBody => + if number == gap.start { + gap.start += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + block_gap = Some(gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP, + &gap.encode(), + ); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + } + block_gap_updated = true; + }, + BlockGapType::MissingBody => { + unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + }, } } else if number > best_num + One::one() && number > One::one() && self.blockchain.header(parent_hash)?.is_none() { - let gap = (best_num + One::one(), number - One::one()); + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); block_gap = Some(gap); block_gap_updated = true; debug!(target: "db", "Detected block gap {block_gap:?}"); diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index b532e0d46662..0b591c967e60 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -25,10 +25,14 @@ use log::{debug, info}; use crate::{Database, DatabaseSource, DbHash}; use codec::Decode; +use sc_client_api::blockchain::{BlockGap, BlockGapType}; use sp_database::Transaction; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, + traits::{ + Block as BlockT, Header as HeaderT, NumberFor, UniqueSaturatedFrom, UniqueSaturatedInto, + Zero, + }, }; use sp_trie::DBValue; @@ -38,6 +42,9 @@ pub const NUM_COLUMNS: u32 = 13; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; +/// Current block gap version. +pub const BLOCK_GAP_CURRENT_VERSION: u32 = 1; + /// Keys of entries in COLUMN_META. pub mod meta_keys { /// Type of storage (full or light). @@ -50,6 +57,8 @@ pub mod meta_keys { pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Block gap. pub const BLOCK_GAP: &[u8; 3] = b"gap"; + /// Block gap version. + pub const BLOCK_GAP_VERSION: &[u8; 7] = b"gap_ver"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -73,8 +82,8 @@ pub struct Meta { pub genesis_hash: H, /// Finalized state, if any pub finalized_state: Option<(H, N)>, - /// Block gap, start and end inclusive, if any. - pub block_gap: Option<(N, N)>, + /// Block gap, if any. + pub block_gap: Option>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -197,7 +206,7 @@ fn open_database_at( open_kvdb_rocksdb::(path, db_type, create, *cache_size)?, DatabaseSource::Custom { db, require_create_flag } => { if *require_create_flag && !create { - return Err(OpenDbError::DoesNotExist) + return Err(OpenDbError::DoesNotExist); } db.clone() }, @@ -364,7 +373,7 @@ pub fn check_database_type( return Err(OpenDbError::UnexpectedDbType { expected: db_type, found: stored_type.to_owned(), - }) + }); }, None => { let mut transaction = Transaction::new(); @@ -515,9 +524,31 @@ where } else { None }; - let block_gap = db - .get(COLUMN_META, meta_keys::BLOCK_GAP) - .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + let block_gap = match db + .get(COLUMN_META, meta_keys::BLOCK_GAP_VERSION) + .and_then(|d| u32::decode(&mut d.as_slice()).ok()) + { + None => { + let old_block_gap: Option<(NumberFor, NumberFor)> = db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + + old_block_gap.map(|(start, end)| BlockGap { + start, + end, + gap_type: BlockGapType::MissingHeaderAndBody, + }) + }, + Some(version) => match version { + BLOCK_GAP_CURRENT_VERSION => db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()), + v => + return Err(sp_blockchain::Error::Backend(format!( + "Unsupported block gap DB version: {v}" + ))), + }, + }; debug!(target: "db", "block_gap={:?}", block_gap); Ok(Meta { diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 21e474048625..f29ed1b083e8 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -44,7 +44,7 @@ use crate::{ use codec::Encode; use log::{debug, error, info, trace, warn}; use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, @@ -1381,7 +1381,7 @@ where } } - if let Some((start, end)) = info.block_gap { + if let Some(BlockGap { start, end, .. }) = info.block_gap { debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); self.gap_sync = Some(GapSync { best_queued_number: start - One::one(), diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 22defd7c5514..8b699c7faffd 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -604,9 +604,8 @@ where } let info = self.backend.blockchain().info(); - let gap_block = info - .block_gap - .map_or(false, |(start, _)| *import_headers.post().number() == start); + let gap_block = + info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start); // the block is lower than our last finalized block so it must revert // finality, refusing import. diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index fd0c5795cbfd..d7386a71a0d1 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -17,6 +17,7 @@ //! Substrate blockchain trait +use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, @@ -109,7 +110,7 @@ pub trait ForkBackend: for block in tree_route.retracted() { expanded_forks.insert(block.hash); } - continue + continue; }, Err(_) => { // There are cases when blocks are missing (e.g. warp-sync). @@ -196,7 +197,7 @@ pub trait Backend: let info = self.info(); if info.finalized_number > *base_header.number() { // `base_header` is on a dead fork. - return Ok(None) + return Ok(None); } self.leaves()? }; @@ -207,7 +208,7 @@ pub trait Backend: // go backwards through the chain (via parent links) loop { if current_hash == base_hash { - return Ok(Some(leaf_hash)) + return Ok(Some(leaf_hash)); } let current_header = self @@ -216,7 +217,7 @@ pub trait Backend: // stop search in this chain once we go below the target's block number if current_header.number() < base_header.number() { - break + break; } current_hash = *current_header.parent_hash(); @@ -266,7 +267,7 @@ pub trait Backend: // If we have only one leaf there are no forks, and we can return early. if finalized_block_number == Zero::zero() || leaves.len() == 1 { - return Ok(DisplacedLeavesAfterFinalization::default()) + return Ok(DisplacedLeavesAfterFinalization::default()); } // Store hashes of finalized blocks for quick checking later, the last block is the @@ -332,7 +333,7 @@ pub trait Backend: elapsed = ?now.elapsed(), "Added genesis leaf to displaced leaves." ); - continue + continue; } debug!( @@ -539,6 +540,29 @@ impl DisplacedLeavesAfterFinalization { } } +/// Represents the type of block gaps that may result from either warp sync or fast sync. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub enum BlockGapType { + /// Both the header and body are missing, as a result of warp sync. + MissingHeaderAndBody, + /// The block body is missing, as a result of fast sync. + MissingBody, +} + +/// Represents the block gap resulted by warp sync or fast sync. +/// +/// A block gap is a range of blocks where either the bodies, or both headers and bodies are +/// missing. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub struct BlockGap { + /// The starting block number of the gap (inclusive). + pub start: N, + /// The ending block number of the gap (inclusive). + pub end: N, + /// The type of gap. + pub gap_type: BlockGapType, +} + /// Blockchain info #[derive(Debug, Eq, PartialEq, Clone)] pub struct Info { @@ -556,8 +580,8 @@ pub struct Info { pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. pub number_leaves: usize, - /// Missing blocks after warp sync. (start, end). - pub block_gap: Option<(NumberFor, NumberFor)>, + /// Missing blocks after warp sync or fast sync. + pub block_gap: Option>>, } /// Block status. From 76df1ae460fb2f9910051e0dac2211ab8d156ced Mon Sep 17 00:00:00 2001 From: Egor_P Date: Fri, 6 Sep 2024 10:29:26 +0200 Subject: [PATCH 5/7] [CI/Release] Pipeline to create a stable release branch (#5598) This PR contains a pipeline which is going to branch off the new stable release branch (e.g. `stab2412`, `stable2503`) and bump `polkadot` `NODE_VERSION`, `spec_version` of the runtimes and reorganisation of the `prdocs` related to the new stable release. This is a first step in the automated `polkadot-sdk` release flow as part of the task: https://github.com/paritytech/polkadot-sdk/issues/3291 The pipeline is not supposed to be triggered in the main` polkadot-sdk` repo, but in the fork in the [`paritytech-release`](https://github.com/paritytech-release/polkadot-sdk) org, where the whole release flow is going to land. Closes: https://github.com/paritytech/release-engineering/issues/222 --- .github/scripts/common/lib.sh | 12 +- .github/scripts/release/release_lib.sh | 118 ++++++++++++++++++ .../workflows/release-branchoff-stable.yml | 105 ++++++++++++++++ 3 files changed, 229 insertions(+), 6 deletions(-) create mode 100644 .github/scripts/release/release_lib.sh create mode 100644 .github/workflows/release-branchoff-stable.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index bfb3120ad9bb..5361db398ae7 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -299,23 +299,23 @@ function check_sha256() { } # Import GPG keys of the release team members -# This is done in parallel as it can take a while sometimes function import_gpg_keys() { - GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} + GPG_KEYSERVER=${GPG_KEYSERVER:-"hkps://keyserver.ubuntu.com"} SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" + PARITY_RELEASES="90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" - echo "Importing GPG keys from $GPG_KEYSERVER in parallel" - for key in $SEC $EGOR $MORGAN; do + echo "Importing GPG keys from $GPG_KEYSERVER" + for key in $SEC $EGOR $MORGAN $PARITY_RELEASES; do ( echo "Importing GPG key $key" gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; - ) & + ) done wait - gpg -k $SEC + gpg -k } # Check the GPG signature for a given binary diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh new file mode 100644 index 000000000000..81a3c14edec8 --- /dev/null +++ b/.github/scripts/release/release_lib.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Set the new version by replacing the value of the constant given as patetrn +# in the file. +# +# input: pattern, version, file +#output: none +set_version() { + pattern=$1 + version=$2 + file=$3 + + sed -i "s/$pattern/\1\"${version}\"/g" $file + return 0 +} + +# Commit changes to git with specific message. +# "|| true" does not let script to fail with exit code 1, +# in case there is nothing to commit. +# +# input: MESSAGE (any message which should be used for the commit) +# output: none +commit_with_message() { + MESSAGE=$1 + git commit -a -m "$MESSAGE" || true +} + +# Retun list of the runtimes filterd +# input: none +# output: list of filtered runtimes +get_filtered_runtimes_list() { + grep_filters=("runtime.*" "test|template|starters|substrate") + + git grep spec_version: | grep .rs: | grep -e "${grep_filters[0]}" | grep "lib.rs" | grep -vE "${grep_filters[1]}" | cut -d: -f1 +} + +# Sets provided spec version +# input: version +set_spec_versions() { + NEW_VERSION=$1 + runtimes_list=(${@:2}) + + printf "Setting spec_version to $NEW_VERSION\n" + + for f in ${runtimes_list[@]}; do + printf " processing $f" + sed -ri "s/spec_version: [0-9]+_[0-9]+_[0-9]+,/spec_version: $NEW_VERSION,/" $f + done + + commit_with_message "Bump spec_version to $NEW_VERSION" + + git_show_log 'spec_version' +} + +# Displays formated results of the git log command +# for the given pattern which needs to be found in logs +# input: pattern, count (optional, default is 10) +git_show_log() { + PATTERN="$1" + COUNT=${2:-10} + git log --pretty=format:"%h %ad | %s%d [%an]" --graph --date=iso-strict | \ + head -n $COUNT | grep -iE "$PATTERN" --color=always -z +} + +# Get a spec_version number from the crate version +# +# ## inputs +# - v1.12.0 or 1.12.0 +# +# ## output: +# 1_012_000 or 1_012_001 if SUFFIX is set +function get_spec_version() { + INPUT=$1 + SUFFIX=${SUFFIX:-000} #this variable makes it possible to set a specific ruuntime version like 93826 it can be intialised as sestem variable + [[ $INPUT =~ .*([0-9]+\.[0-9]+\.[0-9]{1,2}).* ]] + VERSION="${BASH_REMATCH[1]}" + MATCH="${BASH_REMATCH[0]}" + if [ -z $MATCH ]; then + return 1 + else + SPEC_VERSION="$(sed -e "s/\./_0/g" -e "s/_[^_]*\$/_$SUFFIX/" <<< $VERSION)" + echo "$SPEC_VERSION" + return 0 + fi +} + +# Reorganize the prdoc files for the release +# +# input: VERSION (e.g. v1.0.0) +# output: none +reorder_prdocs() { + VERSION="$1" + + printf "[+] ℹ️ Reordering prdocs:" + + VERSION=$(sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/' <<< "$VERSION") #getting reed of the 'v' prefix + mkdir -p "prdoc/$VERSION" + mv prdoc/pr_*.prdoc prdoc/$VERSION + git add -A + commit_with_message "Reordering prdocs for the release $VERSION" +} + +# Bump the binary version of the polkadot-parachain binary with the +# new bumped version and commit changes. +# +# input: version e.g. 1.16.0 +set_polkadot_parachain_binary_version() { + bumped_version="$1" + cargo_toml_file="$2" + + set_version "\(^version = \)\".*\"" $bumped_version $cargo_toml_file + + cargo update --workspace --offline # we need this to update Cargo.loc with the new versions as well + + MESSAGE="Bump versions in: ${cargo_toml_file}" + commit_with_message "$MESSAGE" + git_show_log "$MESSAGE" +} diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-branchoff-stable.yml new file mode 100644 index 000000000000..c236a66a9fae --- /dev/null +++ b/.github/workflows/release-branchoff-stable.yml @@ -0,0 +1,105 @@ +name: Release - Branch off stable branch + +on: + workflow_dispatch: + inputs: + stable_version: + description: New stable version in the format stableYYMM + required: true + type: string + + node_version: + description: Version of the polkadot node in the format vX.XX.X (e.g. 1.15.0) + required: true + +jobs: + # TODO: Activate this job when the pipeline is moved to the fork in the `paritytech-release` org + # check-workflow-can-run: + # uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@latest + + + prepare-tooling: + runs-on: ubuntu-latest + outputs: + node_version: ${{ steps.validate_inputs.outputs.node_version }} + stable_version: ${{ steps.validate_inputs.outputs.stable_version }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + node_version=$(filter_version_from_input "${{ inputs.node_version }}") + echo "node_version=${node_version}" >> $GITHUB_OUTPUT + + stable_version=$(validate_stable_tag ${{ inputs.stable_version }}) + echo "stable_version=${stable_version}" >> $GITHUB_OUTPUT + + create-stable-branch: + # needs: [check-workflow-can-run, prepare-tooling] + needs: [prepare-tooling] + # if: needs. check-workflow-can-run.outputs.checks_passed == 'true' + runs-on: ubuntu-latest + + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + STABLE_BRANCH_NAME: ${{ needs.prepare-tooling.outputs.stable_version }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign commits + pip install git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69 + + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: master + + - name: Import gpg keys + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + + - name: Config git + run: | + git config --global commit.gpgsign true + git config --global gpg.program /home/runner/.local/bin/pgpkms-git + git config --global user.name "ParityReleases" + git config --global user.email "release-team@parity.io" + git config --global user.signingKey "90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" + + - name: Create stable branch + run: | + git checkout -b "$STABLE_BRANCH_NAME" + git show-ref "$STABLE_BRANCH_NAME" + + - name: Bump versions, reorder prdocs and push stable branch + run: | + . ./.github/scripts/release/release_lib.sh + + NODE_VERSION="${{ needs.prepare-tooling.outputs.node_version }}" + set_version "\(NODE_VERSION[^=]*= \)\".*\"" $NODE_VERSION "polkadot/node/primitives/src/lib.rs" + commit_with_message "Bump node version to $NODE_VERSION in polkadot-cli" + + SPEC_VERSION=$(get_spec_version $NODE_VERSION) + runtimes_list=$(get_filtered_runtimes_list) + set_spec_versions $SPEC_VERSION "${runtimes_list[@]}" + + # TODO: clarify what to do with the polkadot-parachain binary + # Set new version for polkadot-parachain binary to match the polkadot node binary + # set_polkadot_parachain_binary_version $NODE_VERSION "cumulus/polkadot-parachain/Cargo.toml" + + reorder_prdocs $NODE_VERSION + + git push origin "$STABLE_BRANCH_NAME" From 986e7ae4f29f804ee4dc89aaf52984d6eda5bd0b Mon Sep 17 00:00:00 2001 From: Radha <86818441+DrW3RK@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:30:49 +0200 Subject: [PATCH 6/7] Update Templates Readme - Github Repo links (#5381) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When someone downloads the Polkadot SDK repo and navigates to the templates folder, the Readme instructions do not work. There is a getting started script of the Polkadot SDK readme which can be overlooked (and also it covers only minimal template and not the parachain/solochain templates). The instructions of the Readme files are updated such that they work for anyone on https://github.com/paritytech/polkadot-sdk https://github.com/paritytech/polkadot-sdk-minimal-template https://github.com/paritytech/polkadot-sdk-parachain-template https://github.com/paritytech/polkadot-sdk-solochain-template --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- templates/minimal/README.md | 8 ++++++++ templates/parachain/README.md | 8 ++++++++ templates/solochain/README.md | 10 +++++++++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 180c229e744e..fe1317a033c7 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -37,6 +37,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch minimal template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-minimal-template.git minimal-template + +cd minimal-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/parachain/README.md b/templates/parachain/README.md index b912d8e005c7..3de85cbeb4dc 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -39,6 +39,14 @@ A Polkadot SDK based project such as this one consists of: * 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. +Fetch parachain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-parachain-template.git parachain-template + +cd parachain-template +``` + ### Build 🔨 Use the following command to build the node without launching it: diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6a5a7853f9c0..c4ce5c7f3fbb 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -23,9 +23,17 @@ packages required to compile this template. Check the the most common dependencies. Alternatively, you can use one of the [alternative installation](#alternatives-installations) options. +Fetch solochain template code: + +```sh +git clone https://github.com/paritytech/polkadot-sdk-solochain-template.git solochain-template + +cd solochain-template +``` + ### Build -Use the following command to build the node without launching it: +🔨 Use the following command to build the node without launching it: ```sh cargo build --release From 5040b3c2186308a06bad408643a5e475df4cfeeb Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 6 Sep 2024 13:29:16 +0200 Subject: [PATCH 7/7] Fix PVF precompilation for Kusama (#5606) ![image](https://github.com/user-attachments/assets/2deaee85-67c3-4119-b0c0-d2e7f818b4ea) Because on Kusama validators.len() < discovery_keys.len() we can tweak the PVF precompilation to allow prepare PVFs when the node is an authority but not a validator. --- .../node/core/candidate-validation/src/lib.rs | 11 +++++----- .../core/candidate-validation/src/tests.rs | 21 +++++++++---------- prdoc/pr_5606.prdoc | 13 ++++++++++++ 3 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 prdoc/pr_5606.prdoc diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 103d29e8d269..a9732e934414 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -47,7 +47,7 @@ use polkadot_primitives::{ }, AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateReceipt, ExecutorParams, Hash, OccupiedCoreAssumption, PersistedValidationData, - PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, + PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; use sp_application_crypto::{AppCrypto, ByteArray}; use sp_keystore::KeystorePtr; @@ -427,14 +427,15 @@ where .iter() .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); - let is_present_authority = session_info - .discovery_keys + // We could've checked discovery_keys but on Kusama validators.len() < discovery_keys.len(). + let is_present_validator = session_info + .validators .iter() - .any(|v| keystore.has_keys(&[(v.to_raw_vec(), AuthorityDiscoveryId::ID)])); + .any(|v| keystore.has_keys(&[(v.to_raw_vec(), ValidatorId::ID)])); // There is still a chance to be a previous session authority, but this extra work does not // affect the finalization. - is_past_present_or_future_authority && !is_present_authority + is_past_present_or_future_authority && !is_present_validator } // Sends PVF with unknown code hashes to the validation host returning the list of code hashes sent. diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 55282fdf4ee1..0dcd84bab6cf 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -25,13 +25,12 @@ use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CoreIndex, GroupIndex, HeadData, Id as ParaId, IndexedVec, SessionInfo, UpwardMessage, - ValidatorId, ValidatorIndex, + CoreIndex, GroupIndex, HeadData, Id as ParaId, SessionInfo, UpwardMessage, ValidatorId, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, }; -use sp_core::testing::TaskExecutor; +use sp_core::{sr25519::Public, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -1194,10 +1193,10 @@ fn dummy_candidate_backed( ) } -fn dummy_session_info(discovery_keys: Vec) -> SessionInfo { +fn dummy_session_info(keys: Vec) -> SessionInfo { SessionInfo { - validators: IndexedVec::::from(vec![]), - discovery_keys, + validators: keys.iter().cloned().map(Into::into).collect(), + discovery_keys: keys.iter().cloned().map(Into::into).collect(), assignment_keys: vec![], validator_groups: Default::default(), n_cores: 4u32, @@ -1246,7 +1245,7 @@ fn maybe_prepare_validation_golden_path() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); @@ -1364,7 +1363,7 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 2); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1510,7 +1509,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); }; @@ -1557,7 +1556,7 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_ ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Alice.public()])))); } ); }; @@ -1604,7 +1603,7 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionInfo(index, tx))) => { assert_eq!(index, 1); - let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public().into()])))); + let _ = tx.send(Ok(Some(dummy_session_info(vec![Sr25519Keyring::Bob.public()])))); } ); diff --git a/prdoc/pr_5606.prdoc b/prdoc/pr_5606.prdoc new file mode 100644 index 000000000000..46883c5722cd --- /dev/null +++ b/prdoc/pr_5606.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix PVF precompilation for Kusama + +doc: + - audience: Node Operator + description: | + Tweaks the PVF precompilation on Kusama to allow prepare PVFs when the node is an authority but not a validator. + +crates: + - name: polkadot-node-core-candidate-validation + bump: patch